]> git.proxmox.com Git - mirror_edk2.git/blob - MdeModulePkg/Core/Dxe/Mem/HeapGuard.c
MdeModulePkg/Core: add freed-memory guard feature
[mirror_edk2.git] / MdeModulePkg / Core / Dxe / Mem / HeapGuard.c
1 /** @file
2 UEFI Heap Guard functions.
3
4 Copyright (c) 2017-2018, Intel Corporation. All rights reserved.<BR>
5 This program and the accompanying materials
6 are licensed and made available under the terms and conditions of the BSD License
7 which accompanies this distribution. The full text of the license may be found at
8 http://opensource.org/licenses/bsd-license.php
9
10 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
11 WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
12
13 **/
14
15 #include "DxeMain.h"
16 #include "Imem.h"
17 #include "HeapGuard.h"
18
19 //
20 // Global to avoid infinite reentrance of memory allocation when updating
21 // page table attributes, which may need allocate pages for new PDE/PTE.
22 //
23 GLOBAL_REMOVE_IF_UNREFERENCED BOOLEAN mOnGuarding = FALSE;
24
25 //
26 // Pointer to table tracking the Guarded memory with bitmap, in which '1'
27 // is used to indicate memory guarded. '0' might be free memory or Guard
28 // page itself, depending on status of memory adjacent to it.
29 //
30 GLOBAL_REMOVE_IF_UNREFERENCED UINT64 mGuardedMemoryMap = 0;
31
32 //
33 // Current depth level of map table pointed by mGuardedMemoryMap.
34 // mMapLevel must be initialized at least by 1. It will be automatically
35 // updated according to the address of memory just tracked.
36 //
37 GLOBAL_REMOVE_IF_UNREFERENCED UINTN mMapLevel = 1;
38
39 //
40 // Shift and mask for each level of map table
41 //
42 GLOBAL_REMOVE_IF_UNREFERENCED UINTN mLevelShift[GUARDED_HEAP_MAP_TABLE_DEPTH]
43 = GUARDED_HEAP_MAP_TABLE_DEPTH_SHIFTS;
44 GLOBAL_REMOVE_IF_UNREFERENCED UINTN mLevelMask[GUARDED_HEAP_MAP_TABLE_DEPTH]
45 = GUARDED_HEAP_MAP_TABLE_DEPTH_MASKS;
46
47 //
48 // Used for promoting freed but not used pages.
49 //
50 GLOBAL_REMOVE_IF_UNREFERENCED EFI_PHYSICAL_ADDRESS mLastPromotedPage = BASE_4GB;
51
52 /**
53 Set corresponding bits in bitmap table to 1 according to the address.
54
55 @param[in] Address Start address to set for.
56 @param[in] BitNumber Number of bits to set.
57 @param[in] BitMap Pointer to bitmap which covers the Address.
58
59 @return VOID.
60 **/
61 STATIC
62 VOID
63 SetBits (
64 IN EFI_PHYSICAL_ADDRESS Address,
65 IN UINTN BitNumber,
66 IN UINT64 *BitMap
67 )
68 {
69 UINTN Lsbs;
70 UINTN Qwords;
71 UINTN Msbs;
72 UINTN StartBit;
73 UINTN EndBit;
74
75 StartBit = (UINTN)GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address);
76 EndBit = (StartBit + BitNumber - 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
77
78 if ((StartBit + BitNumber) >= GUARDED_HEAP_MAP_ENTRY_BITS) {
79 Msbs = (GUARDED_HEAP_MAP_ENTRY_BITS - StartBit) %
80 GUARDED_HEAP_MAP_ENTRY_BITS;
81 Lsbs = (EndBit + 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
82 Qwords = (BitNumber - Msbs) / GUARDED_HEAP_MAP_ENTRY_BITS;
83 } else {
84 Msbs = BitNumber;
85 Lsbs = 0;
86 Qwords = 0;
87 }
88
89 if (Msbs > 0) {
90 *BitMap |= LShiftU64 (LShiftU64 (1, Msbs) - 1, StartBit);
91 BitMap += 1;
92 }
93
94 if (Qwords > 0) {
95 SetMem64 ((VOID *)BitMap, Qwords * GUARDED_HEAP_MAP_ENTRY_BYTES,
96 (UINT64)-1);
97 BitMap += Qwords;
98 }
99
100 if (Lsbs > 0) {
101 *BitMap |= (LShiftU64 (1, Lsbs) - 1);
102 }
103 }
104
105 /**
106 Set corresponding bits in bitmap table to 0 according to the address.
107
108 @param[in] Address Start address to set for.
109 @param[in] BitNumber Number of bits to set.
110 @param[in] BitMap Pointer to bitmap which covers the Address.
111
112 @return VOID.
113 **/
114 STATIC
115 VOID
116 ClearBits (
117 IN EFI_PHYSICAL_ADDRESS Address,
118 IN UINTN BitNumber,
119 IN UINT64 *BitMap
120 )
121 {
122 UINTN Lsbs;
123 UINTN Qwords;
124 UINTN Msbs;
125 UINTN StartBit;
126 UINTN EndBit;
127
128 StartBit = (UINTN)GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address);
129 EndBit = (StartBit + BitNumber - 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
130
131 if ((StartBit + BitNumber) >= GUARDED_HEAP_MAP_ENTRY_BITS) {
132 Msbs = (GUARDED_HEAP_MAP_ENTRY_BITS - StartBit) %
133 GUARDED_HEAP_MAP_ENTRY_BITS;
134 Lsbs = (EndBit + 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
135 Qwords = (BitNumber - Msbs) / GUARDED_HEAP_MAP_ENTRY_BITS;
136 } else {
137 Msbs = BitNumber;
138 Lsbs = 0;
139 Qwords = 0;
140 }
141
142 if (Msbs > 0) {
143 *BitMap &= ~LShiftU64 (LShiftU64 (1, Msbs) - 1, StartBit);
144 BitMap += 1;
145 }
146
147 if (Qwords > 0) {
148 SetMem64 ((VOID *)BitMap, Qwords * GUARDED_HEAP_MAP_ENTRY_BYTES, 0);
149 BitMap += Qwords;
150 }
151
152 if (Lsbs > 0) {
153 *BitMap &= ~(LShiftU64 (1, Lsbs) - 1);
154 }
155 }
156
157 /**
158 Get corresponding bits in bitmap table according to the address.
159
160 The value of bit 0 corresponds to the status of memory at given Address.
161 No more than 64 bits can be retrieved in one call.
162
163 @param[in] Address Start address to retrieve bits for.
164 @param[in] BitNumber Number of bits to get.
165 @param[in] BitMap Pointer to bitmap which covers the Address.
166
167 @return An integer containing the bits information.
168 **/
169 STATIC
170 UINT64
171 GetBits (
172 IN EFI_PHYSICAL_ADDRESS Address,
173 IN UINTN BitNumber,
174 IN UINT64 *BitMap
175 )
176 {
177 UINTN StartBit;
178 UINTN EndBit;
179 UINTN Lsbs;
180 UINTN Msbs;
181 UINT64 Result;
182
183 ASSERT (BitNumber <= GUARDED_HEAP_MAP_ENTRY_BITS);
184
185 StartBit = (UINTN)GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address);
186 EndBit = (StartBit + BitNumber - 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
187
188 if ((StartBit + BitNumber) > GUARDED_HEAP_MAP_ENTRY_BITS) {
189 Msbs = GUARDED_HEAP_MAP_ENTRY_BITS - StartBit;
190 Lsbs = (EndBit + 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
191 } else {
192 Msbs = BitNumber;
193 Lsbs = 0;
194 }
195
196 if (StartBit == 0 && BitNumber == GUARDED_HEAP_MAP_ENTRY_BITS) {
197 Result = *BitMap;
198 } else {
199 Result = RShiftU64((*BitMap), StartBit) & (LShiftU64(1, Msbs) - 1);
200 if (Lsbs > 0) {
201 BitMap += 1;
202 Result |= LShiftU64 ((*BitMap) & (LShiftU64 (1, Lsbs) - 1), Msbs);
203 }
204 }
205
206 return Result;
207 }
208
209 /**
210 Locate the pointer of bitmap from the guarded memory bitmap tables, which
211 covers the given Address.
212
213 @param[in] Address Start address to search the bitmap for.
214 @param[in] AllocMapUnit Flag to indicate memory allocation for the table.
215 @param[out] BitMap Pointer to bitmap which covers the Address.
216
217 @return The bit number from given Address to the end of current map table.
218 **/
219 UINTN
220 FindGuardedMemoryMap (
221 IN EFI_PHYSICAL_ADDRESS Address,
222 IN BOOLEAN AllocMapUnit,
223 OUT UINT64 **BitMap
224 )
225 {
226 UINTN Level;
227 UINT64 *GuardMap;
228 UINT64 MapMemory;
229 UINTN Index;
230 UINTN Size;
231 UINTN BitsToUnitEnd;
232 EFI_STATUS Status;
233
234 //
235 // Adjust current map table depth according to the address to access
236 //
237 while (AllocMapUnit &&
238 mMapLevel < GUARDED_HEAP_MAP_TABLE_DEPTH &&
239 RShiftU64 (
240 Address,
241 mLevelShift[GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel - 1]
242 ) != 0) {
243
244 if (mGuardedMemoryMap != 0) {
245 Size = (mLevelMask[GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel - 1] + 1)
246 * GUARDED_HEAP_MAP_ENTRY_BYTES;
247 Status = CoreInternalAllocatePages (
248 AllocateAnyPages,
249 EfiBootServicesData,
250 EFI_SIZE_TO_PAGES (Size),
251 &MapMemory,
252 FALSE
253 );
254 ASSERT_EFI_ERROR (Status);
255 ASSERT (MapMemory != 0);
256
257 SetMem ((VOID *)(UINTN)MapMemory, Size, 0);
258
259 *(UINT64 *)(UINTN)MapMemory = mGuardedMemoryMap;
260 mGuardedMemoryMap = MapMemory;
261 }
262
263 mMapLevel++;
264
265 }
266
267 GuardMap = &mGuardedMemoryMap;
268 for (Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;
269 Level < GUARDED_HEAP_MAP_TABLE_DEPTH;
270 ++Level) {
271
272 if (*GuardMap == 0) {
273 if (!AllocMapUnit) {
274 GuardMap = NULL;
275 break;
276 }
277
278 Size = (mLevelMask[Level] + 1) * GUARDED_HEAP_MAP_ENTRY_BYTES;
279 Status = CoreInternalAllocatePages (
280 AllocateAnyPages,
281 EfiBootServicesData,
282 EFI_SIZE_TO_PAGES (Size),
283 &MapMemory,
284 FALSE
285 );
286 ASSERT_EFI_ERROR (Status);
287 ASSERT (MapMemory != 0);
288
289 SetMem ((VOID *)(UINTN)MapMemory, Size, 0);
290 *GuardMap = MapMemory;
291 }
292
293 Index = (UINTN)RShiftU64 (Address, mLevelShift[Level]);
294 Index &= mLevelMask[Level];
295 GuardMap = (UINT64 *)(UINTN)((*GuardMap) + Index * sizeof (UINT64));
296
297 }
298
299 BitsToUnitEnd = GUARDED_HEAP_MAP_BITS - GUARDED_HEAP_MAP_BIT_INDEX (Address);
300 *BitMap = GuardMap;
301
302 return BitsToUnitEnd;
303 }
304
305 /**
306 Set corresponding bits in bitmap table to 1 according to given memory range.
307
308 @param[in] Address Memory address to guard from.
309 @param[in] NumberOfPages Number of pages to guard.
310
311 @return VOID.
312 **/
313 VOID
314 EFIAPI
315 SetGuardedMemoryBits (
316 IN EFI_PHYSICAL_ADDRESS Address,
317 IN UINTN NumberOfPages
318 )
319 {
320 UINT64 *BitMap;
321 UINTN Bits;
322 UINTN BitsToUnitEnd;
323
324 while (NumberOfPages > 0) {
325 BitsToUnitEnd = FindGuardedMemoryMap (Address, TRUE, &BitMap);
326 ASSERT (BitMap != NULL);
327
328 if (NumberOfPages > BitsToUnitEnd) {
329 // Cross map unit
330 Bits = BitsToUnitEnd;
331 } else {
332 Bits = NumberOfPages;
333 }
334
335 SetBits (Address, Bits, BitMap);
336
337 NumberOfPages -= Bits;
338 Address += EFI_PAGES_TO_SIZE (Bits);
339 }
340 }
341
342 /**
343 Clear corresponding bits in bitmap table according to given memory range.
344
345 @param[in] Address Memory address to unset from.
346 @param[in] NumberOfPages Number of pages to unset guard.
347
348 @return VOID.
349 **/
350 VOID
351 EFIAPI
352 ClearGuardedMemoryBits (
353 IN EFI_PHYSICAL_ADDRESS Address,
354 IN UINTN NumberOfPages
355 )
356 {
357 UINT64 *BitMap;
358 UINTN Bits;
359 UINTN BitsToUnitEnd;
360
361 while (NumberOfPages > 0) {
362 BitsToUnitEnd = FindGuardedMemoryMap (Address, TRUE, &BitMap);
363 ASSERT (BitMap != NULL);
364
365 if (NumberOfPages > BitsToUnitEnd) {
366 // Cross map unit
367 Bits = BitsToUnitEnd;
368 } else {
369 Bits = NumberOfPages;
370 }
371
372 ClearBits (Address, Bits, BitMap);
373
374 NumberOfPages -= Bits;
375 Address += EFI_PAGES_TO_SIZE (Bits);
376 }
377 }
378
379 /**
380 Retrieve corresponding bits in bitmap table according to given memory range.
381
382 @param[in] Address Memory address to retrieve from.
383 @param[in] NumberOfPages Number of pages to retrieve.
384
385 @return An integer containing the guarded memory bitmap.
386 **/
387 UINT64
388 GetGuardedMemoryBits (
389 IN EFI_PHYSICAL_ADDRESS Address,
390 IN UINTN NumberOfPages
391 )
392 {
393 UINT64 *BitMap;
394 UINTN Bits;
395 UINT64 Result;
396 UINTN Shift;
397 UINTN BitsToUnitEnd;
398
399 ASSERT (NumberOfPages <= GUARDED_HEAP_MAP_ENTRY_BITS);
400
401 Result = 0;
402 Shift = 0;
403 while (NumberOfPages > 0) {
404 BitsToUnitEnd = FindGuardedMemoryMap (Address, FALSE, &BitMap);
405
406 if (NumberOfPages > BitsToUnitEnd) {
407 // Cross map unit
408 Bits = BitsToUnitEnd;
409 } else {
410 Bits = NumberOfPages;
411 }
412
413 if (BitMap != NULL) {
414 Result |= LShiftU64 (GetBits (Address, Bits, BitMap), Shift);
415 }
416
417 Shift += Bits;
418 NumberOfPages -= Bits;
419 Address += EFI_PAGES_TO_SIZE (Bits);
420 }
421
422 return Result;
423 }
424
425 /**
426 Get bit value in bitmap table for the given address.
427
428 @param[in] Address The address to retrieve for.
429
430 @return 1 or 0.
431 **/
432 UINTN
433 EFIAPI
434 GetGuardMapBit (
435 IN EFI_PHYSICAL_ADDRESS Address
436 )
437 {
438 UINT64 *GuardMap;
439
440 FindGuardedMemoryMap (Address, FALSE, &GuardMap);
441 if (GuardMap != NULL) {
442 if (RShiftU64 (*GuardMap,
443 GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address)) & 1) {
444 return 1;
445 }
446 }
447
448 return 0;
449 }
450
451
452 /**
453 Check to see if the page at the given address is a Guard page or not.
454
455 @param[in] Address The address to check for.
456
457 @return TRUE The page at Address is a Guard page.
458 @return FALSE The page at Address is not a Guard page.
459 **/
460 BOOLEAN
461 EFIAPI
462 IsGuardPage (
463 IN EFI_PHYSICAL_ADDRESS Address
464 )
465 {
466 UINTN BitMap;
467
468 //
469 // There must be at least one guarded page before and/or after given
470 // address if it's a Guard page. The bitmap pattern should be one of
471 // 001, 100 and 101
472 //
473 BitMap = GetGuardedMemoryBits (Address - EFI_PAGE_SIZE, 3);
474 return ((BitMap == BIT0) || (BitMap == BIT2) || (BitMap == (BIT2 | BIT0)));
475 }
476
477
478 /**
479 Check to see if the page at the given address is guarded or not.
480
481 @param[in] Address The address to check for.
482
483 @return TRUE The page at Address is guarded.
484 @return FALSE The page at Address is not guarded.
485 **/
486 BOOLEAN
487 EFIAPI
488 IsMemoryGuarded (
489 IN EFI_PHYSICAL_ADDRESS Address
490 )
491 {
492 return (GetGuardMapBit (Address) == 1);
493 }
494
495 /**
496 Set the page at the given address to be a Guard page.
497
498 This is done by changing the page table attribute to be NOT PRSENT.
499
500 @param[in] BaseAddress Page address to Guard at
501
502 @return VOID
503 **/
504 VOID
505 EFIAPI
506 SetGuardPage (
507 IN EFI_PHYSICAL_ADDRESS BaseAddress
508 )
509 {
510 EFI_STATUS Status;
511
512 if (gCpu == NULL) {
513 return;
514 }
515
516 //
517 // Set flag to make sure allocating memory without GUARD for page table
518 // operation; otherwise infinite loops could be caused.
519 //
520 mOnGuarding = TRUE;
521 //
522 // Note: This might overwrite other attributes needed by other features,
523 // such as NX memory protection.
524 //
525 Status = gCpu->SetMemoryAttributes (gCpu, BaseAddress, EFI_PAGE_SIZE, EFI_MEMORY_RP);
526 ASSERT_EFI_ERROR (Status);
527 mOnGuarding = FALSE;
528 }
529
530 /**
531 Unset the Guard page at the given address to the normal memory.
532
533 This is done by changing the page table attribute to be PRSENT.
534
535 @param[in] BaseAddress Page address to Guard at.
536
537 @return VOID.
538 **/
539 VOID
540 EFIAPI
541 UnsetGuardPage (
542 IN EFI_PHYSICAL_ADDRESS BaseAddress
543 )
544 {
545 UINT64 Attributes;
546 EFI_STATUS Status;
547
548 if (gCpu == NULL) {
549 return;
550 }
551
552 //
553 // Once the Guard page is unset, it will be freed back to memory pool. NX
554 // memory protection must be restored for this page if NX is enabled for free
555 // memory.
556 //
557 Attributes = 0;
558 if ((PcdGet64 (PcdDxeNxMemoryProtectionPolicy) & (1 << EfiConventionalMemory)) != 0) {
559 Attributes |= EFI_MEMORY_XP;
560 }
561
562 //
563 // Set flag to make sure allocating memory without GUARD for page table
564 // operation; otherwise infinite loops could be caused.
565 //
566 mOnGuarding = TRUE;
567 //
568 // Note: This might overwrite other attributes needed by other features,
569 // such as memory protection (NX). Please make sure they are not enabled
570 // at the same time.
571 //
572 Status = gCpu->SetMemoryAttributes (gCpu, BaseAddress, EFI_PAGE_SIZE, Attributes);
573 ASSERT_EFI_ERROR (Status);
574 mOnGuarding = FALSE;
575 }
576
577 /**
578 Check to see if the memory at the given address should be guarded or not.
579
580 @param[in] MemoryType Memory type to check.
581 @param[in] AllocateType Allocation type to check.
582 @param[in] PageOrPool Indicate a page allocation or pool allocation.
583
584
585 @return TRUE The given type of memory should be guarded.
586 @return FALSE The given type of memory should not be guarded.
587 **/
588 BOOLEAN
589 IsMemoryTypeToGuard (
590 IN EFI_MEMORY_TYPE MemoryType,
591 IN EFI_ALLOCATE_TYPE AllocateType,
592 IN UINT8 PageOrPool
593 )
594 {
595 UINT64 TestBit;
596 UINT64 ConfigBit;
597
598 if (AllocateType == AllocateAddress) {
599 return FALSE;
600 }
601
602 if ((PcdGet8 (PcdHeapGuardPropertyMask) & PageOrPool) == 0) {
603 return FALSE;
604 }
605
606 if (PageOrPool == GUARD_HEAP_TYPE_POOL) {
607 ConfigBit = PcdGet64 (PcdHeapGuardPoolType);
608 } else if (PageOrPool == GUARD_HEAP_TYPE_PAGE) {
609 ConfigBit = PcdGet64 (PcdHeapGuardPageType);
610 } else {
611 ConfigBit = (UINT64)-1;
612 }
613
614 if ((UINT32)MemoryType >= MEMORY_TYPE_OS_RESERVED_MIN) {
615 TestBit = BIT63;
616 } else if ((UINT32) MemoryType >= MEMORY_TYPE_OEM_RESERVED_MIN) {
617 TestBit = BIT62;
618 } else if (MemoryType < EfiMaxMemoryType) {
619 TestBit = LShiftU64 (1, MemoryType);
620 } else if (MemoryType == EfiMaxMemoryType) {
621 TestBit = (UINT64)-1;
622 } else {
623 TestBit = 0;
624 }
625
626 return ((ConfigBit & TestBit) != 0);
627 }
628
629 /**
630 Check to see if the pool at the given address should be guarded or not.
631
632 @param[in] MemoryType Pool type to check.
633
634
635 @return TRUE The given type of pool should be guarded.
636 @return FALSE The given type of pool should not be guarded.
637 **/
638 BOOLEAN
639 IsPoolTypeToGuard (
640 IN EFI_MEMORY_TYPE MemoryType
641 )
642 {
643 return IsMemoryTypeToGuard (MemoryType, AllocateAnyPages,
644 GUARD_HEAP_TYPE_POOL);
645 }
646
647 /**
648 Check to see if the page at the given address should be guarded or not.
649
650 @param[in] MemoryType Page type to check.
651 @param[in] AllocateType Allocation type to check.
652
653 @return TRUE The given type of page should be guarded.
654 @return FALSE The given type of page should not be guarded.
655 **/
656 BOOLEAN
657 IsPageTypeToGuard (
658 IN EFI_MEMORY_TYPE MemoryType,
659 IN EFI_ALLOCATE_TYPE AllocateType
660 )
661 {
662 return IsMemoryTypeToGuard (MemoryType, AllocateType, GUARD_HEAP_TYPE_PAGE);
663 }
664
665 /**
666 Check to see if the heap guard is enabled for page and/or pool allocation.
667
668 @param[in] GuardType Specify the sub-type(s) of Heap Guard.
669
670 @return TRUE/FALSE.
671 **/
672 BOOLEAN
673 IsHeapGuardEnabled (
674 UINT8 GuardType
675 )
676 {
677 return IsMemoryTypeToGuard (EfiMaxMemoryType, AllocateAnyPages, GuardType);
678 }
679
680 /**
681 Set head Guard and tail Guard for the given memory range.
682
683 @param[in] Memory Base address of memory to set guard for.
684 @param[in] NumberOfPages Memory size in pages.
685
686 @return VOID
687 **/
688 VOID
689 SetGuardForMemory (
690 IN EFI_PHYSICAL_ADDRESS Memory,
691 IN UINTN NumberOfPages
692 )
693 {
694 EFI_PHYSICAL_ADDRESS GuardPage;
695
696 //
697 // Set tail Guard
698 //
699 GuardPage = Memory + EFI_PAGES_TO_SIZE (NumberOfPages);
700 if (!IsGuardPage (GuardPage)) {
701 SetGuardPage (GuardPage);
702 }
703
704 // Set head Guard
705 GuardPage = Memory - EFI_PAGES_TO_SIZE (1);
706 if (!IsGuardPage (GuardPage)) {
707 SetGuardPage (GuardPage);
708 }
709
710 //
711 // Mark the memory range as Guarded
712 //
713 SetGuardedMemoryBits (Memory, NumberOfPages);
714 }
715
716 /**
717 Unset head Guard and tail Guard for the given memory range.
718
719 @param[in] Memory Base address of memory to unset guard for.
720 @param[in] NumberOfPages Memory size in pages.
721
722 @return VOID
723 **/
724 VOID
725 UnsetGuardForMemory (
726 IN EFI_PHYSICAL_ADDRESS Memory,
727 IN UINTN NumberOfPages
728 )
729 {
730 EFI_PHYSICAL_ADDRESS GuardPage;
731 UINT64 GuardBitmap;
732
733 if (NumberOfPages == 0) {
734 return;
735 }
736
737 //
738 // Head Guard must be one page before, if any.
739 //
740 // MSB-> 1 0 <-LSB
741 // -------------------
742 // Head Guard -> 0 1 -> Don't free Head Guard (shared Guard)
743 // Head Guard -> 0 0 -> Free Head Guard either (not shared Guard)
744 // 1 X -> Don't free first page (need a new Guard)
745 // (it'll be turned into a Guard page later)
746 // -------------------
747 // Start -> -1 -2
748 //
749 GuardPage = Memory - EFI_PAGES_TO_SIZE (1);
750 GuardBitmap = GetGuardedMemoryBits (Memory - EFI_PAGES_TO_SIZE (2), 2);
751 if ((GuardBitmap & BIT1) == 0) {
752 //
753 // Head Guard exists.
754 //
755 if ((GuardBitmap & BIT0) == 0) {
756 //
757 // If the head Guard is not a tail Guard of adjacent memory block,
758 // unset it.
759 //
760 UnsetGuardPage (GuardPage);
761 }
762 } else {
763 //
764 // Pages before memory to free are still in Guard. It's a partial free
765 // case. Turn first page of memory block to free into a new Guard.
766 //
767 SetGuardPage (Memory);
768 }
769
770 //
771 // Tail Guard must be the page after this memory block to free, if any.
772 //
773 // MSB-> 1 0 <-LSB
774 // --------------------
775 // 1 0 <- Tail Guard -> Don't free Tail Guard (shared Guard)
776 // 0 0 <- Tail Guard -> Free Tail Guard either (not shared Guard)
777 // X 1 -> Don't free last page (need a new Guard)
778 // (it'll be turned into a Guard page later)
779 // --------------------
780 // +1 +0 <- End
781 //
782 GuardPage = Memory + EFI_PAGES_TO_SIZE (NumberOfPages);
783 GuardBitmap = GetGuardedMemoryBits (GuardPage, 2);
784 if ((GuardBitmap & BIT0) == 0) {
785 //
786 // Tail Guard exists.
787 //
788 if ((GuardBitmap & BIT1) == 0) {
789 //
790 // If the tail Guard is not a head Guard of adjacent memory block,
791 // free it; otherwise, keep it.
792 //
793 UnsetGuardPage (GuardPage);
794 }
795 } else {
796 //
797 // Pages after memory to free are still in Guard. It's a partial free
798 // case. We need to keep one page to be a head Guard.
799 //
800 SetGuardPage (GuardPage - EFI_PAGES_TO_SIZE (1));
801 }
802
803 //
804 // No matter what, we just clear the mark of the Guarded memory.
805 //
806 ClearGuardedMemoryBits(Memory, NumberOfPages);
807 }
808
809 /**
810 Adjust address of free memory according to existing and/or required Guard.
811
812 This function will check if there're existing Guard pages of adjacent
813 memory blocks, and try to use it as the Guard page of the memory to be
814 allocated.
815
816 @param[in] Start Start address of free memory block.
817 @param[in] Size Size of free memory block.
818 @param[in] SizeRequested Size of memory to allocate.
819
820 @return The end address of memory block found.
821 @return 0 if no enough space for the required size of memory and its Guard.
822 **/
823 UINT64
824 AdjustMemoryS (
825 IN UINT64 Start,
826 IN UINT64 Size,
827 IN UINT64 SizeRequested
828 )
829 {
830 UINT64 Target;
831
832 //
833 // UEFI spec requires that allocated pool must be 8-byte aligned. If it's
834 // indicated to put the pool near the Tail Guard, we need extra bytes to
835 // make sure alignment of the returned pool address.
836 //
837 if ((PcdGet8 (PcdHeapGuardPropertyMask) & BIT7) == 0) {
838 SizeRequested = ALIGN_VALUE(SizeRequested, 8);
839 }
840
841 Target = Start + Size - SizeRequested;
842 ASSERT (Target >= Start);
843 if (Target == 0) {
844 return 0;
845 }
846
847 if (!IsGuardPage (Start + Size)) {
848 // No Guard at tail to share. One more page is needed.
849 Target -= EFI_PAGES_TO_SIZE (1);
850 }
851
852 // Out of range?
853 if (Target < Start) {
854 return 0;
855 }
856
857 // At the edge?
858 if (Target == Start) {
859 if (!IsGuardPage (Target - EFI_PAGES_TO_SIZE (1))) {
860 // No enough space for a new head Guard if no Guard at head to share.
861 return 0;
862 }
863 }
864
865 // OK, we have enough pages for memory and its Guards. Return the End of the
866 // free space.
867 return Target + SizeRequested - 1;
868 }
869
870 /**
871 Adjust the start address and number of pages to free according to Guard.
872
873 The purpose of this function is to keep the shared Guard page with adjacent
874 memory block if it's still in guard, or free it if no more sharing. Another
875 is to reserve pages as Guard pages in partial page free situation.
876
877 @param[in,out] Memory Base address of memory to free.
878 @param[in,out] NumberOfPages Size of memory to free.
879
880 @return VOID.
881 **/
882 VOID
883 AdjustMemoryF (
884 IN OUT EFI_PHYSICAL_ADDRESS *Memory,
885 IN OUT UINTN *NumberOfPages
886 )
887 {
888 EFI_PHYSICAL_ADDRESS Start;
889 EFI_PHYSICAL_ADDRESS MemoryToTest;
890 UINTN PagesToFree;
891 UINT64 GuardBitmap;
892
893 if (Memory == NULL || NumberOfPages == NULL || *NumberOfPages == 0) {
894 return;
895 }
896
897 Start = *Memory;
898 PagesToFree = *NumberOfPages;
899
900 //
901 // Head Guard must be one page before, if any.
902 //
903 // MSB-> 1 0 <-LSB
904 // -------------------
905 // Head Guard -> 0 1 -> Don't free Head Guard (shared Guard)
906 // Head Guard -> 0 0 -> Free Head Guard either (not shared Guard)
907 // 1 X -> Don't free first page (need a new Guard)
908 // (it'll be turned into a Guard page later)
909 // -------------------
910 // Start -> -1 -2
911 //
912 MemoryToTest = Start - EFI_PAGES_TO_SIZE (2);
913 GuardBitmap = GetGuardedMemoryBits (MemoryToTest, 2);
914 if ((GuardBitmap & BIT1) == 0) {
915 //
916 // Head Guard exists.
917 //
918 if ((GuardBitmap & BIT0) == 0) {
919 //
920 // If the head Guard is not a tail Guard of adjacent memory block,
921 // free it; otherwise, keep it.
922 //
923 Start -= EFI_PAGES_TO_SIZE (1);
924 PagesToFree += 1;
925 }
926 } else {
927 //
928 // No Head Guard, and pages before memory to free are still in Guard. It's a
929 // partial free case. We need to keep one page to be a tail Guard.
930 //
931 Start += EFI_PAGES_TO_SIZE (1);
932 PagesToFree -= 1;
933 }
934
935 //
936 // Tail Guard must be the page after this memory block to free, if any.
937 //
938 // MSB-> 1 0 <-LSB
939 // --------------------
940 // 1 0 <- Tail Guard -> Don't free Tail Guard (shared Guard)
941 // 0 0 <- Tail Guard -> Free Tail Guard either (not shared Guard)
942 // X 1 -> Don't free last page (need a new Guard)
943 // (it'll be turned into a Guard page later)
944 // --------------------
945 // +1 +0 <- End
946 //
947 MemoryToTest = Start + EFI_PAGES_TO_SIZE (PagesToFree);
948 GuardBitmap = GetGuardedMemoryBits (MemoryToTest, 2);
949 if ((GuardBitmap & BIT0) == 0) {
950 //
951 // Tail Guard exists.
952 //
953 if ((GuardBitmap & BIT1) == 0) {
954 //
955 // If the tail Guard is not a head Guard of adjacent memory block,
956 // free it; otherwise, keep it.
957 //
958 PagesToFree += 1;
959 }
960 } else if (PagesToFree > 0) {
961 //
962 // No Tail Guard, and pages after memory to free are still in Guard. It's a
963 // partial free case. We need to keep one page to be a head Guard.
964 //
965 PagesToFree -= 1;
966 }
967
968 *Memory = Start;
969 *NumberOfPages = PagesToFree;
970 }
971
972 /**
973 Adjust the base and number of pages to really allocate according to Guard.
974
975 @param[in,out] Memory Base address of free memory.
976 @param[in,out] NumberOfPages Size of memory to allocate.
977
978 @return VOID.
979 **/
980 VOID
981 AdjustMemoryA (
982 IN OUT EFI_PHYSICAL_ADDRESS *Memory,
983 IN OUT UINTN *NumberOfPages
984 )
985 {
986 //
987 // FindFreePages() has already taken the Guard into account. It's safe to
988 // adjust the start address and/or number of pages here, to make sure that
989 // the Guards are also "allocated".
990 //
991 if (!IsGuardPage (*Memory + EFI_PAGES_TO_SIZE (*NumberOfPages))) {
992 // No tail Guard, add one.
993 *NumberOfPages += 1;
994 }
995
996 if (!IsGuardPage (*Memory - EFI_PAGE_SIZE)) {
997 // No head Guard, add one.
998 *Memory -= EFI_PAGE_SIZE;
999 *NumberOfPages += 1;
1000 }
1001 }
1002
1003 /**
1004 Adjust the pool head position to make sure the Guard page is adjavent to
1005 pool tail or pool head.
1006
1007 @param[in] Memory Base address of memory allocated.
1008 @param[in] NoPages Number of pages actually allocated.
1009 @param[in] Size Size of memory requested.
1010 (plus pool head/tail overhead)
1011
1012 @return Address of pool head.
1013 **/
1014 VOID *
1015 AdjustPoolHeadA (
1016 IN EFI_PHYSICAL_ADDRESS Memory,
1017 IN UINTN NoPages,
1018 IN UINTN Size
1019 )
1020 {
1021 if (Memory == 0 || (PcdGet8 (PcdHeapGuardPropertyMask) & BIT7) != 0) {
1022 //
1023 // Pool head is put near the head Guard
1024 //
1025 return (VOID *)(UINTN)Memory;
1026 }
1027
1028 //
1029 // Pool head is put near the tail Guard
1030 //
1031 Size = ALIGN_VALUE (Size, 8);
1032 return (VOID *)(UINTN)(Memory + EFI_PAGES_TO_SIZE (NoPages) - Size);
1033 }
1034
1035 /**
1036 Get the page base address according to pool head address.
1037
1038 @param[in] Memory Head address of pool to free.
1039
1040 @return Address of pool head.
1041 **/
1042 VOID *
1043 AdjustPoolHeadF (
1044 IN EFI_PHYSICAL_ADDRESS Memory
1045 )
1046 {
1047 if (Memory == 0 || (PcdGet8 (PcdHeapGuardPropertyMask) & BIT7) != 0) {
1048 //
1049 // Pool head is put near the head Guard
1050 //
1051 return (VOID *)(UINTN)Memory;
1052 }
1053
1054 //
1055 // Pool head is put near the tail Guard
1056 //
1057 return (VOID *)(UINTN)(Memory & ~EFI_PAGE_MASK);
1058 }
1059
1060 /**
1061 Allocate or free guarded memory.
1062
1063 @param[in] Start Start address of memory to allocate or free.
1064 @param[in] NumberOfPages Memory size in pages.
1065 @param[in] NewType Memory type to convert to.
1066
1067 @return VOID.
1068 **/
1069 EFI_STATUS
1070 CoreConvertPagesWithGuard (
1071 IN UINT64 Start,
1072 IN UINTN NumberOfPages,
1073 IN EFI_MEMORY_TYPE NewType
1074 )
1075 {
1076 UINT64 OldStart;
1077 UINTN OldPages;
1078
1079 if (NewType == EfiConventionalMemory) {
1080 OldStart = Start;
1081 OldPages = NumberOfPages;
1082
1083 AdjustMemoryF (&Start, &NumberOfPages);
1084 //
1085 // It's safe to unset Guard page inside memory lock because there should
1086 // be no memory allocation occurred in updating memory page attribute at
1087 // this point. And unsetting Guard page before free will prevent Guard
1088 // page just freed back to pool from being allocated right away before
1089 // marking it usable (from non-present to present).
1090 //
1091 UnsetGuardForMemory (OldStart, OldPages);
1092 if (NumberOfPages == 0) {
1093 return EFI_SUCCESS;
1094 }
1095 } else {
1096 AdjustMemoryA (&Start, &NumberOfPages);
1097 }
1098
1099 return CoreConvertPages (Start, NumberOfPages, NewType);
1100 }
1101
1102 /**
1103 Set all Guard pages which cannot be set before CPU Arch Protocol installed.
1104 **/
1105 VOID
1106 SetAllGuardPages (
1107 VOID
1108 )
1109 {
1110 UINTN Entries[GUARDED_HEAP_MAP_TABLE_DEPTH];
1111 UINTN Shifts[GUARDED_HEAP_MAP_TABLE_DEPTH];
1112 UINTN Indices[GUARDED_HEAP_MAP_TABLE_DEPTH];
1113 UINT64 Tables[GUARDED_HEAP_MAP_TABLE_DEPTH];
1114 UINT64 Addresses[GUARDED_HEAP_MAP_TABLE_DEPTH];
1115 UINT64 TableEntry;
1116 UINT64 Address;
1117 UINT64 GuardPage;
1118 INTN Level;
1119 UINTN Index;
1120 BOOLEAN OnGuarding;
1121
1122 if (mGuardedMemoryMap == 0 ||
1123 mMapLevel == 0 ||
1124 mMapLevel > GUARDED_HEAP_MAP_TABLE_DEPTH) {
1125 return;
1126 }
1127
1128 CopyMem (Entries, mLevelMask, sizeof (Entries));
1129 CopyMem (Shifts, mLevelShift, sizeof (Shifts));
1130
1131 SetMem (Tables, sizeof(Tables), 0);
1132 SetMem (Addresses, sizeof(Addresses), 0);
1133 SetMem (Indices, sizeof(Indices), 0);
1134
1135 Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;
1136 Tables[Level] = mGuardedMemoryMap;
1137 Address = 0;
1138 OnGuarding = FALSE;
1139
1140 DEBUG_CODE (
1141 DumpGuardedMemoryBitmap ();
1142 );
1143
1144 while (TRUE) {
1145 if (Indices[Level] > Entries[Level]) {
1146 Tables[Level] = 0;
1147 Level -= 1;
1148 } else {
1149
1150 TableEntry = ((UINT64 *)(UINTN)(Tables[Level]))[Indices[Level]];
1151 Address = Addresses[Level];
1152
1153 if (TableEntry == 0) {
1154
1155 OnGuarding = FALSE;
1156
1157 } else if (Level < GUARDED_HEAP_MAP_TABLE_DEPTH - 1) {
1158
1159 Level += 1;
1160 Tables[Level] = TableEntry;
1161 Addresses[Level] = Address;
1162 Indices[Level] = 0;
1163
1164 continue;
1165
1166 } else {
1167
1168 Index = 0;
1169 while (Index < GUARDED_HEAP_MAP_ENTRY_BITS) {
1170 if ((TableEntry & 1) == 1) {
1171 if (OnGuarding) {
1172 GuardPage = 0;
1173 } else {
1174 GuardPage = Address - EFI_PAGE_SIZE;
1175 }
1176 OnGuarding = TRUE;
1177 } else {
1178 if (OnGuarding) {
1179 GuardPage = Address;
1180 } else {
1181 GuardPage = 0;
1182 }
1183 OnGuarding = FALSE;
1184 }
1185
1186 if (GuardPage != 0) {
1187 SetGuardPage (GuardPage);
1188 }
1189
1190 if (TableEntry == 0) {
1191 break;
1192 }
1193
1194 TableEntry = RShiftU64 (TableEntry, 1);
1195 Address += EFI_PAGE_SIZE;
1196 Index += 1;
1197 }
1198 }
1199 }
1200
1201 if (Level < (GUARDED_HEAP_MAP_TABLE_DEPTH - (INTN)mMapLevel)) {
1202 break;
1203 }
1204
1205 Indices[Level] += 1;
1206 Address = (Level == 0) ? 0 : Addresses[Level - 1];
1207 Addresses[Level] = Address | LShiftU64(Indices[Level], Shifts[Level]);
1208
1209 }
1210 }
1211
1212 /**
1213 Find the address of top-most guarded free page.
1214
1215 @param[out] Address Start address of top-most guarded free page.
1216
1217 @return VOID.
1218 **/
1219 VOID
1220 GetLastGuardedFreePageAddress (
1221 OUT EFI_PHYSICAL_ADDRESS *Address
1222 )
1223 {
1224 EFI_PHYSICAL_ADDRESS AddressGranularity;
1225 EFI_PHYSICAL_ADDRESS BaseAddress;
1226 UINTN Level;
1227 UINT64 Map;
1228 INTN Index;
1229
1230 ASSERT (mMapLevel >= 1);
1231
1232 BaseAddress = 0;
1233 Map = mGuardedMemoryMap;
1234 for (Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;
1235 Level < GUARDED_HEAP_MAP_TABLE_DEPTH;
1236 ++Level) {
1237 AddressGranularity = LShiftU64 (1, mLevelShift[Level]);
1238
1239 //
1240 // Find the non-NULL entry at largest index.
1241 //
1242 for (Index = (INTN)mLevelMask[Level]; Index >= 0 ; --Index) {
1243 if (((UINT64 *)(UINTN)Map)[Index] != 0) {
1244 BaseAddress += MultU64x32 (AddressGranularity, (UINT32)Index);
1245 Map = ((UINT64 *)(UINTN)Map)[Index];
1246 break;
1247 }
1248 }
1249 }
1250
1251 //
1252 // Find the non-zero MSB then get the page address.
1253 //
1254 while (Map != 0) {
1255 Map = RShiftU64 (Map, 1);
1256 BaseAddress += EFI_PAGES_TO_SIZE (1);
1257 }
1258
1259 *Address = BaseAddress;
1260 }
1261
1262 /**
1263 Record freed pages.
1264
1265 @param[in] BaseAddress Base address of just freed pages.
1266 @param[in] Pages Number of freed pages.
1267
1268 @return VOID.
1269 **/
1270 VOID
1271 MarkFreedPages (
1272 IN EFI_PHYSICAL_ADDRESS BaseAddress,
1273 IN UINTN Pages
1274 )
1275 {
1276 SetGuardedMemoryBits (BaseAddress, Pages);
1277 }
1278
1279 /**
1280 Record freed pages as well as mark them as not-present.
1281
1282 @param[in] BaseAddress Base address of just freed pages.
1283 @param[in] Pages Number of freed pages.
1284
1285 @return VOID.
1286 **/
1287 VOID
1288 EFIAPI
1289 GuardFreedPages (
1290 IN EFI_PHYSICAL_ADDRESS BaseAddress,
1291 IN UINTN Pages
1292 )
1293 {
1294 EFI_STATUS Status;
1295
1296 //
1297 // Legacy memory lower than 1MB might be accessed with no allocation. Leave
1298 // them alone.
1299 //
1300 if (BaseAddress < BASE_1MB) {
1301 return;
1302 }
1303
1304 MarkFreedPages (BaseAddress, Pages);
1305 if (gCpu != NULL) {
1306 //
1307 // Set flag to make sure allocating memory without GUARD for page table
1308 // operation; otherwise infinite loops could be caused.
1309 //
1310 mOnGuarding = TRUE;
1311 //
1312 // Note: This might overwrite other attributes needed by other features,
1313 // such as NX memory protection.
1314 //
1315 Status = gCpu->SetMemoryAttributes (
1316 gCpu,
1317 BaseAddress,
1318 EFI_PAGES_TO_SIZE (Pages),
1319 EFI_MEMORY_RP
1320 );
1321 //
1322 // Normally we should ASSERT the returned Status. But there might be memory
1323 // alloc/free involved in SetMemoryAttributes(), which might fail this
1324 // calling. It's rare case so it's OK to let a few tiny holes be not-guarded.
1325 //
1326 if (EFI_ERROR (Status)) {
1327 DEBUG ((DEBUG_WARN, "Failed to guard freed pages: %p (%lu)\n", BaseAddress, (UINT64)Pages));
1328 }
1329 mOnGuarding = FALSE;
1330 }
1331 }
1332
1333 /**
1334 Record freed pages as well as mark them as not-present, if enabled.
1335
1336 @param[in] BaseAddress Base address of just freed pages.
1337 @param[in] Pages Number of freed pages.
1338
1339 @return VOID.
1340 **/
1341 VOID
1342 EFIAPI
1343 GuardFreedPagesChecked (
1344 IN EFI_PHYSICAL_ADDRESS BaseAddress,
1345 IN UINTN Pages
1346 )
1347 {
1348 if (IsHeapGuardEnabled (GUARD_HEAP_TYPE_FREED)) {
1349 GuardFreedPages (BaseAddress, Pages);
1350 }
1351 }
1352
1353 /**
1354 Mark all pages freed before CPU Arch Protocol as not-present.
1355
1356 **/
1357 VOID
1358 GuardAllFreedPages (
1359 VOID
1360 )
1361 {
1362 UINTN Entries[GUARDED_HEAP_MAP_TABLE_DEPTH];
1363 UINTN Shifts[GUARDED_HEAP_MAP_TABLE_DEPTH];
1364 UINTN Indices[GUARDED_HEAP_MAP_TABLE_DEPTH];
1365 UINT64 Tables[GUARDED_HEAP_MAP_TABLE_DEPTH];
1366 UINT64 Addresses[GUARDED_HEAP_MAP_TABLE_DEPTH];
1367 UINT64 TableEntry;
1368 UINT64 Address;
1369 UINT64 GuardPage;
1370 INTN Level;
1371 UINTN BitIndex;
1372 UINTN GuardPageNumber;
1373
1374 if (mGuardedMemoryMap == 0 ||
1375 mMapLevel == 0 ||
1376 mMapLevel > GUARDED_HEAP_MAP_TABLE_DEPTH) {
1377 return;
1378 }
1379
1380 CopyMem (Entries, mLevelMask, sizeof (Entries));
1381 CopyMem (Shifts, mLevelShift, sizeof (Shifts));
1382
1383 SetMem (Tables, sizeof(Tables), 0);
1384 SetMem (Addresses, sizeof(Addresses), 0);
1385 SetMem (Indices, sizeof(Indices), 0);
1386
1387 Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;
1388 Tables[Level] = mGuardedMemoryMap;
1389 Address = 0;
1390 GuardPage = (UINT64)-1;
1391 GuardPageNumber = 0;
1392
1393 while (TRUE) {
1394 if (Indices[Level] > Entries[Level]) {
1395 Tables[Level] = 0;
1396 Level -= 1;
1397 } else {
1398 TableEntry = ((UINT64 *)(UINTN)(Tables[Level]))[Indices[Level]];
1399 Address = Addresses[Level];
1400
1401 if (Level < GUARDED_HEAP_MAP_TABLE_DEPTH - 1) {
1402 Level += 1;
1403 Tables[Level] = TableEntry;
1404 Addresses[Level] = Address;
1405 Indices[Level] = 0;
1406
1407 continue;
1408 } else {
1409 BitIndex = 1;
1410 while (BitIndex != 0) {
1411 if ((TableEntry & BitIndex) != 0) {
1412 if (GuardPage == (UINT64)-1) {
1413 GuardPage = Address;
1414 }
1415 ++GuardPageNumber;
1416 } else if (GuardPageNumber > 0) {
1417 GuardFreedPages (GuardPage, GuardPageNumber);
1418 GuardPageNumber = 0;
1419 GuardPage = (UINT64)-1;
1420 }
1421
1422 if (TableEntry == 0) {
1423 break;
1424 }
1425
1426 Address += EFI_PAGES_TO_SIZE (1);
1427 BitIndex = LShiftU64 (BitIndex, 1);
1428 }
1429 }
1430 }
1431
1432 if (Level < (GUARDED_HEAP_MAP_TABLE_DEPTH - (INTN)mMapLevel)) {
1433 break;
1434 }
1435
1436 Indices[Level] += 1;
1437 Address = (Level == 0) ? 0 : Addresses[Level - 1];
1438 Addresses[Level] = Address | LShiftU64 (Indices[Level], Shifts[Level]);
1439
1440 }
1441
1442 //
1443 // Update the maximum address of freed page which can be used for memory
1444 // promotion upon out-of-memory-space.
1445 //
1446 GetLastGuardedFreePageAddress (&Address);
1447 if (Address != 0) {
1448 mLastPromotedPage = Address;
1449 }
1450 }
1451
1452 /**
1453 This function checks to see if the given memory map descriptor in a memory map
1454 can be merged with any guarded free pages.
1455
1456 @param MemoryMapEntry A pointer to a descriptor in MemoryMap.
1457 @param MaxAddress Maximum address to stop the merge.
1458
1459 @return VOID
1460
1461 **/
1462 VOID
1463 MergeGuardPages (
1464 IN EFI_MEMORY_DESCRIPTOR *MemoryMapEntry,
1465 IN EFI_PHYSICAL_ADDRESS MaxAddress
1466 )
1467 {
1468 EFI_PHYSICAL_ADDRESS EndAddress;
1469 UINT64 Bitmap;
1470 INTN Pages;
1471
1472 if (!IsHeapGuardEnabled (GUARD_HEAP_TYPE_FREED) ||
1473 MemoryMapEntry->Type >= EfiMemoryMappedIO) {
1474 return;
1475 }
1476
1477 Bitmap = 0;
1478 Pages = EFI_SIZE_TO_PAGES (MaxAddress - MemoryMapEntry->PhysicalStart);
1479 Pages -= MemoryMapEntry->NumberOfPages;
1480 while (Pages > 0) {
1481 if (Bitmap == 0) {
1482 EndAddress = MemoryMapEntry->PhysicalStart +
1483 EFI_PAGES_TO_SIZE (MemoryMapEntry->NumberOfPages);
1484 Bitmap = GetGuardedMemoryBits (EndAddress, GUARDED_HEAP_MAP_ENTRY_BITS);
1485 }
1486
1487 if ((Bitmap & 1) == 0) {
1488 break;
1489 }
1490
1491 Pages--;
1492 MemoryMapEntry->NumberOfPages++;
1493 Bitmap = RShiftU64 (Bitmap, 1);
1494 }
1495 }
1496
1497 /**
1498 Put part (at most 64 pages a time) guarded free pages back to free page pool.
1499
1500 Freed memory guard is used to detect Use-After-Free (UAF) memory issue, which
1501 makes use of 'Used then throw away' way to detect any illegal access to freed
1502 memory. The thrown-away memory will be marked as not-present so that any access
1503 to those memory (after free) will be caught by page-fault exception.
1504
1505 The problem is that this will consume lots of memory space. Once no memory
1506 left in pool to allocate, we have to restore part of the freed pages to their
1507 normal function. Otherwise the whole system will stop functioning.
1508
1509 @param StartAddress Start address of promoted memory.
1510 @param EndAddress End address of promoted memory.
1511
1512 @return TRUE Succeeded to promote memory.
1513 @return FALSE No free memory found.
1514
1515 **/
1516 BOOLEAN
1517 PromoteGuardedFreePages (
1518 OUT EFI_PHYSICAL_ADDRESS *StartAddress,
1519 OUT EFI_PHYSICAL_ADDRESS *EndAddress
1520 )
1521 {
1522 EFI_STATUS Status;
1523 UINTN AvailablePages;
1524 UINT64 Bitmap;
1525 EFI_PHYSICAL_ADDRESS Start;
1526
1527 if (!IsHeapGuardEnabled (GUARD_HEAP_TYPE_FREED)) {
1528 return FALSE;
1529 }
1530
1531 //
1532 // Similar to memory allocation service, always search the freed pages in
1533 // descending direction.
1534 //
1535 Start = mLastPromotedPage;
1536 AvailablePages = 0;
1537 while (AvailablePages == 0) {
1538 Start -= EFI_PAGES_TO_SIZE (GUARDED_HEAP_MAP_ENTRY_BITS);
1539 //
1540 // If the address wraps around, try the really freed pages at top.
1541 //
1542 if (Start > mLastPromotedPage) {
1543 GetLastGuardedFreePageAddress (&Start);
1544 ASSERT (Start != 0);
1545 Start -= EFI_PAGES_TO_SIZE (GUARDED_HEAP_MAP_ENTRY_BITS);
1546 }
1547
1548 Bitmap = GetGuardedMemoryBits (Start, GUARDED_HEAP_MAP_ENTRY_BITS);
1549 while (Bitmap > 0) {
1550 if ((Bitmap & 1) != 0) {
1551 ++AvailablePages;
1552 } else if (AvailablePages == 0) {
1553 Start += EFI_PAGES_TO_SIZE (1);
1554 } else {
1555 break;
1556 }
1557
1558 Bitmap = RShiftU64 (Bitmap, 1);
1559 }
1560 }
1561
1562 if (AvailablePages) {
1563 DEBUG ((DEBUG_INFO, "Promoted pages: %lX (%lx)\r\n", Start, (UINT64)AvailablePages));
1564 ClearGuardedMemoryBits (Start, AvailablePages);
1565
1566 if (gCpu != NULL) {
1567 //
1568 // Set flag to make sure allocating memory without GUARD for page table
1569 // operation; otherwise infinite loops could be caused.
1570 //
1571 mOnGuarding = TRUE;
1572 Status = gCpu->SetMemoryAttributes (gCpu, Start, EFI_PAGES_TO_SIZE(AvailablePages), 0);
1573 ASSERT_EFI_ERROR (Status);
1574 mOnGuarding = FALSE;
1575 }
1576
1577 mLastPromotedPage = Start;
1578 *StartAddress = Start;
1579 *EndAddress = Start + EFI_PAGES_TO_SIZE (AvailablePages) - 1;
1580 return TRUE;
1581 }
1582
1583 return FALSE;
1584 }
1585
1586 /**
1587 Notify function used to set all Guard pages before CPU Arch Protocol installed.
1588 **/
1589 VOID
1590 HeapGuardCpuArchProtocolNotify (
1591 VOID
1592 )
1593 {
1594 ASSERT (gCpu != NULL);
1595
1596 if (IsHeapGuardEnabled (GUARD_HEAP_TYPE_PAGE|GUARD_HEAP_TYPE_POOL) &&
1597 IsHeapGuardEnabled (GUARD_HEAP_TYPE_FREED)) {
1598 DEBUG ((DEBUG_ERROR, "Heap guard and freed memory guard cannot be enabled at the same time.\n"));
1599 CpuDeadLoop ();
1600 }
1601
1602 if (IsHeapGuardEnabled (GUARD_HEAP_TYPE_PAGE|GUARD_HEAP_TYPE_POOL)) {
1603 SetAllGuardPages ();
1604 }
1605
1606 if (IsHeapGuardEnabled (GUARD_HEAP_TYPE_FREED)) {
1607 GuardAllFreedPages ();
1608 }
1609 }
1610
1611 /**
1612 Helper function to convert a UINT64 value in binary to a string.
1613
1614 @param[in] Value Value of a UINT64 integer.
1615 @param[out] BinString String buffer to contain the conversion result.
1616
1617 @return VOID.
1618 **/
1619 VOID
1620 Uint64ToBinString (
1621 IN UINT64 Value,
1622 OUT CHAR8 *BinString
1623 )
1624 {
1625 UINTN Index;
1626
1627 if (BinString == NULL) {
1628 return;
1629 }
1630
1631 for (Index = 64; Index > 0; --Index) {
1632 BinString[Index - 1] = '0' + (Value & 1);
1633 Value = RShiftU64 (Value, 1);
1634 }
1635 BinString[64] = '\0';
1636 }
1637
1638 /**
1639 Dump the guarded memory bit map.
1640 **/
1641 VOID
1642 EFIAPI
1643 DumpGuardedMemoryBitmap (
1644 VOID
1645 )
1646 {
1647 UINTN Entries[GUARDED_HEAP_MAP_TABLE_DEPTH];
1648 UINTN Shifts[GUARDED_HEAP_MAP_TABLE_DEPTH];
1649 UINTN Indices[GUARDED_HEAP_MAP_TABLE_DEPTH];
1650 UINT64 Tables[GUARDED_HEAP_MAP_TABLE_DEPTH];
1651 UINT64 Addresses[GUARDED_HEAP_MAP_TABLE_DEPTH];
1652 UINT64 TableEntry;
1653 UINT64 Address;
1654 INTN Level;
1655 UINTN RepeatZero;
1656 CHAR8 String[GUARDED_HEAP_MAP_ENTRY_BITS + 1];
1657 CHAR8 *Ruler1;
1658 CHAR8 *Ruler2;
1659
1660 if (!IsHeapGuardEnabled (GUARD_HEAP_TYPE_ALL)) {
1661 return;
1662 }
1663
1664 if (mGuardedMemoryMap == 0 ||
1665 mMapLevel == 0 ||
1666 mMapLevel > GUARDED_HEAP_MAP_TABLE_DEPTH) {
1667 return;
1668 }
1669
1670 Ruler1 = " 3 2 1 0";
1671 Ruler2 = "FEDCBA9876543210FEDCBA9876543210FEDCBA9876543210FEDCBA9876543210";
1672
1673 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "============================="
1674 " Guarded Memory Bitmap "
1675 "==============================\r\n"));
1676 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, " %a\r\n", Ruler1));
1677 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, " %a\r\n", Ruler2));
1678
1679 CopyMem (Entries, mLevelMask, sizeof (Entries));
1680 CopyMem (Shifts, mLevelShift, sizeof (Shifts));
1681
1682 SetMem (Indices, sizeof(Indices), 0);
1683 SetMem (Tables, sizeof(Tables), 0);
1684 SetMem (Addresses, sizeof(Addresses), 0);
1685
1686 Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;
1687 Tables[Level] = mGuardedMemoryMap;
1688 Address = 0;
1689 RepeatZero = 0;
1690
1691 while (TRUE) {
1692 if (Indices[Level] > Entries[Level]) {
1693
1694 Tables[Level] = 0;
1695 Level -= 1;
1696 RepeatZero = 0;
1697
1698 DEBUG ((
1699 HEAP_GUARD_DEBUG_LEVEL,
1700 "========================================="
1701 "=========================================\r\n"
1702 ));
1703
1704 } else {
1705
1706 TableEntry = ((UINT64 *)(UINTN)Tables[Level])[Indices[Level]];
1707 Address = Addresses[Level];
1708
1709 if (TableEntry == 0) {
1710
1711 if (Level == GUARDED_HEAP_MAP_TABLE_DEPTH - 1) {
1712 if (RepeatZero == 0) {
1713 Uint64ToBinString(TableEntry, String);
1714 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "%016lx: %a\r\n", Address, String));
1715 } else if (RepeatZero == 1) {
1716 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "... : ...\r\n"));
1717 }
1718 RepeatZero += 1;
1719 }
1720
1721 } else if (Level < GUARDED_HEAP_MAP_TABLE_DEPTH - 1) {
1722
1723 Level += 1;
1724 Tables[Level] = TableEntry;
1725 Addresses[Level] = Address;
1726 Indices[Level] = 0;
1727 RepeatZero = 0;
1728
1729 continue;
1730
1731 } else {
1732
1733 RepeatZero = 0;
1734 Uint64ToBinString(TableEntry, String);
1735 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "%016lx: %a\r\n", Address, String));
1736
1737 }
1738 }
1739
1740 if (Level < (GUARDED_HEAP_MAP_TABLE_DEPTH - (INTN)mMapLevel)) {
1741 break;
1742 }
1743
1744 Indices[Level] += 1;
1745 Address = (Level == 0) ? 0 : Addresses[Level - 1];
1746 Addresses[Level] = Address | LShiftU64(Indices[Level], Shifts[Level]);
1747
1748 }
1749 }
1750