]> git.proxmox.com Git - mirror_edk2.git/blob - MdeModulePkg/Core/Dxe/Mem/HeapGuard.c
UefiCpuPkg: Move AsmRelocateApLoopStart from Mpfuncs.nasm to AmdSev.nasm
[mirror_edk2.git] / MdeModulePkg / Core / Dxe / Mem / HeapGuard.c
1 /** @file
2 UEFI Heap Guard functions.
3
4 Copyright (c) 2017-2018, Intel Corporation. All rights reserved.<BR>
5 SPDX-License-Identifier: BSD-2-Clause-Patent
6
7 **/
8
9 #include "DxeMain.h"
10 #include "Imem.h"
11 #include "HeapGuard.h"
12
13 //
14 // Global to avoid infinite reentrance of memory allocation when updating
15 // page table attributes, which may need allocate pages for new PDE/PTE.
16 //
17 GLOBAL_REMOVE_IF_UNREFERENCED BOOLEAN mOnGuarding = FALSE;
18
19 //
20 // Pointer to table tracking the Guarded memory with bitmap, in which '1'
21 // is used to indicate memory guarded. '0' might be free memory or Guard
22 // page itself, depending on status of memory adjacent to it.
23 //
24 GLOBAL_REMOVE_IF_UNREFERENCED UINT64 mGuardedMemoryMap = 0;
25
26 //
27 // Current depth level of map table pointed by mGuardedMemoryMap.
28 // mMapLevel must be initialized at least by 1. It will be automatically
29 // updated according to the address of memory just tracked.
30 //
31 GLOBAL_REMOVE_IF_UNREFERENCED UINTN mMapLevel = 1;
32
33 //
34 // Shift and mask for each level of map table
35 //
36 GLOBAL_REMOVE_IF_UNREFERENCED UINTN mLevelShift[GUARDED_HEAP_MAP_TABLE_DEPTH]
37 = GUARDED_HEAP_MAP_TABLE_DEPTH_SHIFTS;
38 GLOBAL_REMOVE_IF_UNREFERENCED UINTN mLevelMask[GUARDED_HEAP_MAP_TABLE_DEPTH]
39 = GUARDED_HEAP_MAP_TABLE_DEPTH_MASKS;
40
41 //
42 // Used for promoting freed but not used pages.
43 //
44 GLOBAL_REMOVE_IF_UNREFERENCED EFI_PHYSICAL_ADDRESS mLastPromotedPage = BASE_4GB;
45
46 /**
47 Set corresponding bits in bitmap table to 1 according to the address.
48
49 @param[in] Address Start address to set for.
50 @param[in] BitNumber Number of bits to set.
51 @param[in] BitMap Pointer to bitmap which covers the Address.
52
53 @return VOID.
54 **/
55 STATIC
56 VOID
57 SetBits (
58 IN EFI_PHYSICAL_ADDRESS Address,
59 IN UINTN BitNumber,
60 IN UINT64 *BitMap
61 )
62 {
63 UINTN Lsbs;
64 UINTN Qwords;
65 UINTN Msbs;
66 UINTN StartBit;
67 UINTN EndBit;
68
69 StartBit = (UINTN)GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address);
70 EndBit = (StartBit + BitNumber - 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
71
72 if ((StartBit + BitNumber) >= GUARDED_HEAP_MAP_ENTRY_BITS) {
73 Msbs = (GUARDED_HEAP_MAP_ENTRY_BITS - StartBit) %
74 GUARDED_HEAP_MAP_ENTRY_BITS;
75 Lsbs = (EndBit + 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
76 Qwords = (BitNumber - Msbs) / GUARDED_HEAP_MAP_ENTRY_BITS;
77 } else {
78 Msbs = BitNumber;
79 Lsbs = 0;
80 Qwords = 0;
81 }
82
83 if (Msbs > 0) {
84 *BitMap |= LShiftU64 (LShiftU64 (1, Msbs) - 1, StartBit);
85 BitMap += 1;
86 }
87
88 if (Qwords > 0) {
89 SetMem64 (
90 (VOID *)BitMap,
91 Qwords * GUARDED_HEAP_MAP_ENTRY_BYTES,
92 (UINT64)-1
93 );
94 BitMap += Qwords;
95 }
96
97 if (Lsbs > 0) {
98 *BitMap |= (LShiftU64 (1, Lsbs) - 1);
99 }
100 }
101
102 /**
103 Set corresponding bits in bitmap table to 0 according to the address.
104
105 @param[in] Address Start address to set for.
106 @param[in] BitNumber Number of bits to set.
107 @param[in] BitMap Pointer to bitmap which covers the Address.
108
109 @return VOID.
110 **/
111 STATIC
112 VOID
113 ClearBits (
114 IN EFI_PHYSICAL_ADDRESS Address,
115 IN UINTN BitNumber,
116 IN UINT64 *BitMap
117 )
118 {
119 UINTN Lsbs;
120 UINTN Qwords;
121 UINTN Msbs;
122 UINTN StartBit;
123 UINTN EndBit;
124
125 StartBit = (UINTN)GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address);
126 EndBit = (StartBit + BitNumber - 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
127
128 if ((StartBit + BitNumber) >= GUARDED_HEAP_MAP_ENTRY_BITS) {
129 Msbs = (GUARDED_HEAP_MAP_ENTRY_BITS - StartBit) %
130 GUARDED_HEAP_MAP_ENTRY_BITS;
131 Lsbs = (EndBit + 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
132 Qwords = (BitNumber - Msbs) / GUARDED_HEAP_MAP_ENTRY_BITS;
133 } else {
134 Msbs = BitNumber;
135 Lsbs = 0;
136 Qwords = 0;
137 }
138
139 if (Msbs > 0) {
140 *BitMap &= ~LShiftU64 (LShiftU64 (1, Msbs) - 1, StartBit);
141 BitMap += 1;
142 }
143
144 if (Qwords > 0) {
145 SetMem64 ((VOID *)BitMap, Qwords * GUARDED_HEAP_MAP_ENTRY_BYTES, 0);
146 BitMap += Qwords;
147 }
148
149 if (Lsbs > 0) {
150 *BitMap &= ~(LShiftU64 (1, Lsbs) - 1);
151 }
152 }
153
154 /**
155 Get corresponding bits in bitmap table according to the address.
156
157 The value of bit 0 corresponds to the status of memory at given Address.
158 No more than 64 bits can be retrieved in one call.
159
160 @param[in] Address Start address to retrieve bits for.
161 @param[in] BitNumber Number of bits to get.
162 @param[in] BitMap Pointer to bitmap which covers the Address.
163
164 @return An integer containing the bits information.
165 **/
166 STATIC
167 UINT64
168 GetBits (
169 IN EFI_PHYSICAL_ADDRESS Address,
170 IN UINTN BitNumber,
171 IN UINT64 *BitMap
172 )
173 {
174 UINTN StartBit;
175 UINTN EndBit;
176 UINTN Lsbs;
177 UINTN Msbs;
178 UINT64 Result;
179
180 ASSERT (BitNumber <= GUARDED_HEAP_MAP_ENTRY_BITS);
181
182 StartBit = (UINTN)GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address);
183 EndBit = (StartBit + BitNumber - 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
184
185 if ((StartBit + BitNumber) > GUARDED_HEAP_MAP_ENTRY_BITS) {
186 Msbs = GUARDED_HEAP_MAP_ENTRY_BITS - StartBit;
187 Lsbs = (EndBit + 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
188 } else {
189 Msbs = BitNumber;
190 Lsbs = 0;
191 }
192
193 if ((StartBit == 0) && (BitNumber == GUARDED_HEAP_MAP_ENTRY_BITS)) {
194 Result = *BitMap;
195 } else {
196 Result = RShiftU64 ((*BitMap), StartBit) & (LShiftU64 (1, Msbs) - 1);
197 if (Lsbs > 0) {
198 BitMap += 1;
199 Result |= LShiftU64 ((*BitMap) & (LShiftU64 (1, Lsbs) - 1), Msbs);
200 }
201 }
202
203 return Result;
204 }
205
206 /**
207 Locate the pointer of bitmap from the guarded memory bitmap tables, which
208 covers the given Address.
209
210 @param[in] Address Start address to search the bitmap for.
211 @param[in] AllocMapUnit Flag to indicate memory allocation for the table.
212 @param[out] BitMap Pointer to bitmap which covers the Address.
213
214 @return The bit number from given Address to the end of current map table.
215 **/
216 UINTN
217 FindGuardedMemoryMap (
218 IN EFI_PHYSICAL_ADDRESS Address,
219 IN BOOLEAN AllocMapUnit,
220 OUT UINT64 **BitMap
221 )
222 {
223 UINTN Level;
224 UINT64 *GuardMap;
225 UINT64 MapMemory;
226 UINTN Index;
227 UINTN Size;
228 UINTN BitsToUnitEnd;
229 EFI_STATUS Status;
230
231 MapMemory = 0;
232
233 //
234 // Adjust current map table depth according to the address to access
235 //
236 while (AllocMapUnit &&
237 mMapLevel < GUARDED_HEAP_MAP_TABLE_DEPTH &&
238 RShiftU64 (
239 Address,
240 mLevelShift[GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel - 1]
241 ) != 0)
242 {
243 if (mGuardedMemoryMap != 0) {
244 Size = (mLevelMask[GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel - 1] + 1)
245 * GUARDED_HEAP_MAP_ENTRY_BYTES;
246 Status = CoreInternalAllocatePages (
247 AllocateAnyPages,
248 EfiBootServicesData,
249 EFI_SIZE_TO_PAGES (Size),
250 &MapMemory,
251 FALSE
252 );
253 ASSERT_EFI_ERROR (Status);
254 ASSERT (MapMemory != 0);
255
256 SetMem ((VOID *)(UINTN)MapMemory, Size, 0);
257
258 *(UINT64 *)(UINTN)MapMemory = mGuardedMemoryMap;
259 mGuardedMemoryMap = MapMemory;
260 }
261
262 mMapLevel++;
263 }
264
265 GuardMap = &mGuardedMemoryMap;
266 for (Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;
267 Level < GUARDED_HEAP_MAP_TABLE_DEPTH;
268 ++Level)
269 {
270 if (*GuardMap == 0) {
271 if (!AllocMapUnit) {
272 GuardMap = NULL;
273 break;
274 }
275
276 Size = (mLevelMask[Level] + 1) * GUARDED_HEAP_MAP_ENTRY_BYTES;
277 Status = CoreInternalAllocatePages (
278 AllocateAnyPages,
279 EfiBootServicesData,
280 EFI_SIZE_TO_PAGES (Size),
281 &MapMemory,
282 FALSE
283 );
284 ASSERT_EFI_ERROR (Status);
285 ASSERT (MapMemory != 0);
286
287 SetMem ((VOID *)(UINTN)MapMemory, Size, 0);
288 *GuardMap = MapMemory;
289 }
290
291 Index = (UINTN)RShiftU64 (Address, mLevelShift[Level]);
292 Index &= mLevelMask[Level];
293 GuardMap = (UINT64 *)(UINTN)((*GuardMap) + Index * sizeof (UINT64));
294 }
295
296 BitsToUnitEnd = GUARDED_HEAP_MAP_BITS - GUARDED_HEAP_MAP_BIT_INDEX (Address);
297 *BitMap = GuardMap;
298
299 return BitsToUnitEnd;
300 }
301
302 /**
303 Set corresponding bits in bitmap table to 1 according to given memory range.
304
305 @param[in] Address Memory address to guard from.
306 @param[in] NumberOfPages Number of pages to guard.
307
308 @return VOID.
309 **/
310 VOID
311 EFIAPI
312 SetGuardedMemoryBits (
313 IN EFI_PHYSICAL_ADDRESS Address,
314 IN UINTN NumberOfPages
315 )
316 {
317 UINT64 *BitMap;
318 UINTN Bits;
319 UINTN BitsToUnitEnd;
320
321 while (NumberOfPages > 0) {
322 BitsToUnitEnd = FindGuardedMemoryMap (Address, TRUE, &BitMap);
323 ASSERT (BitMap != NULL);
324
325 if (NumberOfPages > BitsToUnitEnd) {
326 // Cross map unit
327 Bits = BitsToUnitEnd;
328 } else {
329 Bits = NumberOfPages;
330 }
331
332 SetBits (Address, Bits, BitMap);
333
334 NumberOfPages -= Bits;
335 Address += EFI_PAGES_TO_SIZE (Bits);
336 }
337 }
338
339 /**
340 Clear corresponding bits in bitmap table according to given memory range.
341
342 @param[in] Address Memory address to unset from.
343 @param[in] NumberOfPages Number of pages to unset guard.
344
345 @return VOID.
346 **/
347 VOID
348 EFIAPI
349 ClearGuardedMemoryBits (
350 IN EFI_PHYSICAL_ADDRESS Address,
351 IN UINTN NumberOfPages
352 )
353 {
354 UINT64 *BitMap;
355 UINTN Bits;
356 UINTN BitsToUnitEnd;
357
358 while (NumberOfPages > 0) {
359 BitsToUnitEnd = FindGuardedMemoryMap (Address, TRUE, &BitMap);
360 ASSERT (BitMap != NULL);
361
362 if (NumberOfPages > BitsToUnitEnd) {
363 // Cross map unit
364 Bits = BitsToUnitEnd;
365 } else {
366 Bits = NumberOfPages;
367 }
368
369 ClearBits (Address, Bits, BitMap);
370
371 NumberOfPages -= Bits;
372 Address += EFI_PAGES_TO_SIZE (Bits);
373 }
374 }
375
376 /**
377 Retrieve corresponding bits in bitmap table according to given memory range.
378
379 @param[in] Address Memory address to retrieve from.
380 @param[in] NumberOfPages Number of pages to retrieve.
381
382 @return An integer containing the guarded memory bitmap.
383 **/
384 UINT64
385 GetGuardedMemoryBits (
386 IN EFI_PHYSICAL_ADDRESS Address,
387 IN UINTN NumberOfPages
388 )
389 {
390 UINT64 *BitMap;
391 UINTN Bits;
392 UINT64 Result;
393 UINTN Shift;
394 UINTN BitsToUnitEnd;
395
396 ASSERT (NumberOfPages <= GUARDED_HEAP_MAP_ENTRY_BITS);
397
398 Result = 0;
399 Shift = 0;
400 while (NumberOfPages > 0) {
401 BitsToUnitEnd = FindGuardedMemoryMap (Address, FALSE, &BitMap);
402
403 if (NumberOfPages > BitsToUnitEnd) {
404 // Cross map unit
405 Bits = BitsToUnitEnd;
406 } else {
407 Bits = NumberOfPages;
408 }
409
410 if (BitMap != NULL) {
411 Result |= LShiftU64 (GetBits (Address, Bits, BitMap), Shift);
412 }
413
414 Shift += Bits;
415 NumberOfPages -= Bits;
416 Address += EFI_PAGES_TO_SIZE (Bits);
417 }
418
419 return Result;
420 }
421
422 /**
423 Get bit value in bitmap table for the given address.
424
425 @param[in] Address The address to retrieve for.
426
427 @return 1 or 0.
428 **/
429 UINTN
430 EFIAPI
431 GetGuardMapBit (
432 IN EFI_PHYSICAL_ADDRESS Address
433 )
434 {
435 UINT64 *GuardMap;
436
437 FindGuardedMemoryMap (Address, FALSE, &GuardMap);
438 if (GuardMap != NULL) {
439 if (RShiftU64 (
440 *GuardMap,
441 GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address)
442 ) & 1)
443 {
444 return 1;
445 }
446 }
447
448 return 0;
449 }
450
451 /**
452 Check to see if the page at the given address is a Guard page or not.
453
454 @param[in] Address The address to check for.
455
456 @return TRUE The page at Address is a Guard page.
457 @return FALSE The page at Address is not a Guard page.
458 **/
459 BOOLEAN
460 EFIAPI
461 IsGuardPage (
462 IN EFI_PHYSICAL_ADDRESS Address
463 )
464 {
465 UINT64 BitMap;
466
467 //
468 // There must be at least one guarded page before and/or after given
469 // address if it's a Guard page. The bitmap pattern should be one of
470 // 001, 100 and 101
471 //
472 BitMap = GetGuardedMemoryBits (Address - EFI_PAGE_SIZE, 3);
473 return ((BitMap == BIT0) || (BitMap == BIT2) || (BitMap == (BIT2 | BIT0)));
474 }
475
476 /**
477 Check to see if the page at the given address is guarded or not.
478
479 @param[in] Address The address to check for.
480
481 @return TRUE The page at Address is guarded.
482 @return FALSE The page at Address is not guarded.
483 **/
484 BOOLEAN
485 EFIAPI
486 IsMemoryGuarded (
487 IN EFI_PHYSICAL_ADDRESS Address
488 )
489 {
490 return (GetGuardMapBit (Address) == 1);
491 }
492
493 /**
494 Set the page at the given address to be a Guard page.
495
496 This is done by changing the page table attribute to be NOT PRSENT.
497
498 @param[in] BaseAddress Page address to Guard at
499
500 @return VOID
501 **/
502 VOID
503 EFIAPI
504 SetGuardPage (
505 IN EFI_PHYSICAL_ADDRESS BaseAddress
506 )
507 {
508 EFI_STATUS Status;
509
510 if (gCpu == NULL) {
511 return;
512 }
513
514 //
515 // Set flag to make sure allocating memory without GUARD for page table
516 // operation; otherwise infinite loops could be caused.
517 //
518 mOnGuarding = TRUE;
519 //
520 // Note: This might overwrite other attributes needed by other features,
521 // such as NX memory protection.
522 //
523 Status = gCpu->SetMemoryAttributes (gCpu, BaseAddress, EFI_PAGE_SIZE, EFI_MEMORY_RP);
524 ASSERT_EFI_ERROR (Status);
525 mOnGuarding = FALSE;
526 }
527
528 /**
529 Unset the Guard page at the given address to the normal memory.
530
531 This is done by changing the page table attribute to be PRSENT.
532
533 @param[in] BaseAddress Page address to Guard at.
534
535 @return VOID.
536 **/
537 VOID
538 EFIAPI
539 UnsetGuardPage (
540 IN EFI_PHYSICAL_ADDRESS BaseAddress
541 )
542 {
543 UINT64 Attributes;
544 EFI_STATUS Status;
545
546 if (gCpu == NULL) {
547 return;
548 }
549
550 //
551 // Once the Guard page is unset, it will be freed back to memory pool. NX
552 // memory protection must be restored for this page if NX is enabled for free
553 // memory.
554 //
555 Attributes = 0;
556 if ((PcdGet64 (PcdDxeNxMemoryProtectionPolicy) & (1 << EfiConventionalMemory)) != 0) {
557 Attributes |= EFI_MEMORY_XP;
558 }
559
560 //
561 // Set flag to make sure allocating memory without GUARD for page table
562 // operation; otherwise infinite loops could be caused.
563 //
564 mOnGuarding = TRUE;
565 //
566 // Note: This might overwrite other attributes needed by other features,
567 // such as memory protection (NX). Please make sure they are not enabled
568 // at the same time.
569 //
570 Status = gCpu->SetMemoryAttributes (gCpu, BaseAddress, EFI_PAGE_SIZE, Attributes);
571 ASSERT_EFI_ERROR (Status);
572 mOnGuarding = FALSE;
573 }
574
575 /**
576 Check to see if the memory at the given address should be guarded or not.
577
578 @param[in] MemoryType Memory type to check.
579 @param[in] AllocateType Allocation type to check.
580 @param[in] PageOrPool Indicate a page allocation or pool allocation.
581
582
583 @return TRUE The given type of memory should be guarded.
584 @return FALSE The given type of memory should not be guarded.
585 **/
586 BOOLEAN
587 IsMemoryTypeToGuard (
588 IN EFI_MEMORY_TYPE MemoryType,
589 IN EFI_ALLOCATE_TYPE AllocateType,
590 IN UINT8 PageOrPool
591 )
592 {
593 UINT64 TestBit;
594 UINT64 ConfigBit;
595
596 if (AllocateType == AllocateAddress) {
597 return FALSE;
598 }
599
600 if ((PcdGet8 (PcdHeapGuardPropertyMask) & PageOrPool) == 0) {
601 return FALSE;
602 }
603
604 if (PageOrPool == GUARD_HEAP_TYPE_POOL) {
605 ConfigBit = PcdGet64 (PcdHeapGuardPoolType);
606 } else if (PageOrPool == GUARD_HEAP_TYPE_PAGE) {
607 ConfigBit = PcdGet64 (PcdHeapGuardPageType);
608 } else {
609 ConfigBit = (UINT64)-1;
610 }
611
612 if ((UINT32)MemoryType >= MEMORY_TYPE_OS_RESERVED_MIN) {
613 TestBit = BIT63;
614 } else if ((UINT32)MemoryType >= MEMORY_TYPE_OEM_RESERVED_MIN) {
615 TestBit = BIT62;
616 } else if (MemoryType < EfiMaxMemoryType) {
617 TestBit = LShiftU64 (1, MemoryType);
618 } else if (MemoryType == EfiMaxMemoryType) {
619 TestBit = (UINT64)-1;
620 } else {
621 TestBit = 0;
622 }
623
624 return ((ConfigBit & TestBit) != 0);
625 }
626
627 /**
628 Check to see if the pool at the given address should be guarded or not.
629
630 @param[in] MemoryType Pool type to check.
631
632
633 @return TRUE The given type of pool should be guarded.
634 @return FALSE The given type of pool should not be guarded.
635 **/
636 BOOLEAN
637 IsPoolTypeToGuard (
638 IN EFI_MEMORY_TYPE MemoryType
639 )
640 {
641 return IsMemoryTypeToGuard (
642 MemoryType,
643 AllocateAnyPages,
644 GUARD_HEAP_TYPE_POOL
645 );
646 }
647
648 /**
649 Check to see if the page at the given address should be guarded or not.
650
651 @param[in] MemoryType Page type to check.
652 @param[in] AllocateType Allocation type to check.
653
654 @return TRUE The given type of page should be guarded.
655 @return FALSE The given type of page should not be guarded.
656 **/
657 BOOLEAN
658 IsPageTypeToGuard (
659 IN EFI_MEMORY_TYPE MemoryType,
660 IN EFI_ALLOCATE_TYPE AllocateType
661 )
662 {
663 return IsMemoryTypeToGuard (MemoryType, AllocateType, GUARD_HEAP_TYPE_PAGE);
664 }
665
666 /**
667 Check to see if the heap guard is enabled for page and/or pool allocation.
668
669 @param[in] GuardType Specify the sub-type(s) of Heap Guard.
670
671 @return TRUE/FALSE.
672 **/
673 BOOLEAN
674 IsHeapGuardEnabled (
675 UINT8 GuardType
676 )
677 {
678 return IsMemoryTypeToGuard (EfiMaxMemoryType, AllocateAnyPages, GuardType);
679 }
680
681 /**
682 Set head Guard and tail Guard for the given memory range.
683
684 @param[in] Memory Base address of memory to set guard for.
685 @param[in] NumberOfPages Memory size in pages.
686
687 @return VOID
688 **/
689 VOID
690 SetGuardForMemory (
691 IN EFI_PHYSICAL_ADDRESS Memory,
692 IN UINTN NumberOfPages
693 )
694 {
695 EFI_PHYSICAL_ADDRESS GuardPage;
696
697 //
698 // Set tail Guard
699 //
700 GuardPage = Memory + EFI_PAGES_TO_SIZE (NumberOfPages);
701 if (!IsGuardPage (GuardPage)) {
702 SetGuardPage (GuardPage);
703 }
704
705 // Set head Guard
706 GuardPage = Memory - EFI_PAGES_TO_SIZE (1);
707 if (!IsGuardPage (GuardPage)) {
708 SetGuardPage (GuardPage);
709 }
710
711 //
712 // Mark the memory range as Guarded
713 //
714 SetGuardedMemoryBits (Memory, NumberOfPages);
715 }
716
717 /**
718 Unset head Guard and tail Guard for the given memory range.
719
720 @param[in] Memory Base address of memory to unset guard for.
721 @param[in] NumberOfPages Memory size in pages.
722
723 @return VOID
724 **/
725 VOID
726 UnsetGuardForMemory (
727 IN EFI_PHYSICAL_ADDRESS Memory,
728 IN UINTN NumberOfPages
729 )
730 {
731 EFI_PHYSICAL_ADDRESS GuardPage;
732 UINT64 GuardBitmap;
733
734 if (NumberOfPages == 0) {
735 return;
736 }
737
738 //
739 // Head Guard must be one page before, if any.
740 //
741 // MSB-> 1 0 <-LSB
742 // -------------------
743 // Head Guard -> 0 1 -> Don't free Head Guard (shared Guard)
744 // Head Guard -> 0 0 -> Free Head Guard either (not shared Guard)
745 // 1 X -> Don't free first page (need a new Guard)
746 // (it'll be turned into a Guard page later)
747 // -------------------
748 // Start -> -1 -2
749 //
750 GuardPage = Memory - EFI_PAGES_TO_SIZE (1);
751 GuardBitmap = GetGuardedMemoryBits (Memory - EFI_PAGES_TO_SIZE (2), 2);
752 if ((GuardBitmap & BIT1) == 0) {
753 //
754 // Head Guard exists.
755 //
756 if ((GuardBitmap & BIT0) == 0) {
757 //
758 // If the head Guard is not a tail Guard of adjacent memory block,
759 // unset it.
760 //
761 UnsetGuardPage (GuardPage);
762 }
763 } else {
764 //
765 // Pages before memory to free are still in Guard. It's a partial free
766 // case. Turn first page of memory block to free into a new Guard.
767 //
768 SetGuardPage (Memory);
769 }
770
771 //
772 // Tail Guard must be the page after this memory block to free, if any.
773 //
774 // MSB-> 1 0 <-LSB
775 // --------------------
776 // 1 0 <- Tail Guard -> Don't free Tail Guard (shared Guard)
777 // 0 0 <- Tail Guard -> Free Tail Guard either (not shared Guard)
778 // X 1 -> Don't free last page (need a new Guard)
779 // (it'll be turned into a Guard page later)
780 // --------------------
781 // +1 +0 <- End
782 //
783 GuardPage = Memory + EFI_PAGES_TO_SIZE (NumberOfPages);
784 GuardBitmap = GetGuardedMemoryBits (GuardPage, 2);
785 if ((GuardBitmap & BIT0) == 0) {
786 //
787 // Tail Guard exists.
788 //
789 if ((GuardBitmap & BIT1) == 0) {
790 //
791 // If the tail Guard is not a head Guard of adjacent memory block,
792 // free it; otherwise, keep it.
793 //
794 UnsetGuardPage (GuardPage);
795 }
796 } else {
797 //
798 // Pages after memory to free are still in Guard. It's a partial free
799 // case. We need to keep one page to be a head Guard.
800 //
801 SetGuardPage (GuardPage - EFI_PAGES_TO_SIZE (1));
802 }
803
804 //
805 // No matter what, we just clear the mark of the Guarded memory.
806 //
807 ClearGuardedMemoryBits (Memory, NumberOfPages);
808 }
809
810 /**
811 Adjust address of free memory according to existing and/or required Guard.
812
813 This function will check if there're existing Guard pages of adjacent
814 memory blocks, and try to use it as the Guard page of the memory to be
815 allocated.
816
817 @param[in] Start Start address of free memory block.
818 @param[in] Size Size of free memory block.
819 @param[in] SizeRequested Size of memory to allocate.
820
821 @return The end address of memory block found.
822 @return 0 if no enough space for the required size of memory and its Guard.
823 **/
824 UINT64
825 AdjustMemoryS (
826 IN UINT64 Start,
827 IN UINT64 Size,
828 IN UINT64 SizeRequested
829 )
830 {
831 UINT64 Target;
832
833 //
834 // UEFI spec requires that allocated pool must be 8-byte aligned. If it's
835 // indicated to put the pool near the Tail Guard, we need extra bytes to
836 // make sure alignment of the returned pool address.
837 //
838 if ((PcdGet8 (PcdHeapGuardPropertyMask) & BIT7) == 0) {
839 SizeRequested = ALIGN_VALUE (SizeRequested, 8);
840 }
841
842 Target = Start + Size - SizeRequested;
843 ASSERT (Target >= Start);
844 if (Target == 0) {
845 return 0;
846 }
847
848 if (!IsGuardPage (Start + Size)) {
849 // No Guard at tail to share. One more page is needed.
850 Target -= EFI_PAGES_TO_SIZE (1);
851 }
852
853 // Out of range?
854 if (Target < Start) {
855 return 0;
856 }
857
858 // At the edge?
859 if (Target == Start) {
860 if (!IsGuardPage (Target - EFI_PAGES_TO_SIZE (1))) {
861 // No enough space for a new head Guard if no Guard at head to share.
862 return 0;
863 }
864 }
865
866 // OK, we have enough pages for memory and its Guards. Return the End of the
867 // free space.
868 return Target + SizeRequested - 1;
869 }
870
871 /**
872 Adjust the start address and number of pages to free according to Guard.
873
874 The purpose of this function is to keep the shared Guard page with adjacent
875 memory block if it's still in guard, or free it if no more sharing. Another
876 is to reserve pages as Guard pages in partial page free situation.
877
878 @param[in,out] Memory Base address of memory to free.
879 @param[in,out] NumberOfPages Size of memory to free.
880
881 @return VOID.
882 **/
883 VOID
884 AdjustMemoryF (
885 IN OUT EFI_PHYSICAL_ADDRESS *Memory,
886 IN OUT UINTN *NumberOfPages
887 )
888 {
889 EFI_PHYSICAL_ADDRESS Start;
890 EFI_PHYSICAL_ADDRESS MemoryToTest;
891 UINTN PagesToFree;
892 UINT64 GuardBitmap;
893
894 if ((Memory == NULL) || (NumberOfPages == NULL) || (*NumberOfPages == 0)) {
895 return;
896 }
897
898 Start = *Memory;
899 PagesToFree = *NumberOfPages;
900
901 //
902 // Head Guard must be one page before, if any.
903 //
904 // MSB-> 1 0 <-LSB
905 // -------------------
906 // Head Guard -> 0 1 -> Don't free Head Guard (shared Guard)
907 // Head Guard -> 0 0 -> Free Head Guard either (not shared Guard)
908 // 1 X -> Don't free first page (need a new Guard)
909 // (it'll be turned into a Guard page later)
910 // -------------------
911 // Start -> -1 -2
912 //
913 MemoryToTest = Start - EFI_PAGES_TO_SIZE (2);
914 GuardBitmap = GetGuardedMemoryBits (MemoryToTest, 2);
915 if ((GuardBitmap & BIT1) == 0) {
916 //
917 // Head Guard exists.
918 //
919 if ((GuardBitmap & BIT0) == 0) {
920 //
921 // If the head Guard is not a tail Guard of adjacent memory block,
922 // free it; otherwise, keep it.
923 //
924 Start -= EFI_PAGES_TO_SIZE (1);
925 PagesToFree += 1;
926 }
927 } else {
928 //
929 // No Head Guard, and pages before memory to free are still in Guard. It's a
930 // partial free case. We need to keep one page to be a tail Guard.
931 //
932 Start += EFI_PAGES_TO_SIZE (1);
933 PagesToFree -= 1;
934 }
935
936 //
937 // Tail Guard must be the page after this memory block to free, if any.
938 //
939 // MSB-> 1 0 <-LSB
940 // --------------------
941 // 1 0 <- Tail Guard -> Don't free Tail Guard (shared Guard)
942 // 0 0 <- Tail Guard -> Free Tail Guard either (not shared Guard)
943 // X 1 -> Don't free last page (need a new Guard)
944 // (it'll be turned into a Guard page later)
945 // --------------------
946 // +1 +0 <- End
947 //
948 MemoryToTest = Start + EFI_PAGES_TO_SIZE (PagesToFree);
949 GuardBitmap = GetGuardedMemoryBits (MemoryToTest, 2);
950 if ((GuardBitmap & BIT0) == 0) {
951 //
952 // Tail Guard exists.
953 //
954 if ((GuardBitmap & BIT1) == 0) {
955 //
956 // If the tail Guard is not a head Guard of adjacent memory block,
957 // free it; otherwise, keep it.
958 //
959 PagesToFree += 1;
960 }
961 } else if (PagesToFree > 0) {
962 //
963 // No Tail Guard, and pages after memory to free are still in Guard. It's a
964 // partial free case. We need to keep one page to be a head Guard.
965 //
966 PagesToFree -= 1;
967 }
968
969 *Memory = Start;
970 *NumberOfPages = PagesToFree;
971 }
972
973 /**
974 Adjust the base and number of pages to really allocate according to Guard.
975
976 @param[in,out] Memory Base address of free memory.
977 @param[in,out] NumberOfPages Size of memory to allocate.
978
979 @return VOID.
980 **/
981 VOID
982 AdjustMemoryA (
983 IN OUT EFI_PHYSICAL_ADDRESS *Memory,
984 IN OUT UINTN *NumberOfPages
985 )
986 {
987 //
988 // FindFreePages() has already taken the Guard into account. It's safe to
989 // adjust the start address and/or number of pages here, to make sure that
990 // the Guards are also "allocated".
991 //
992 if (!IsGuardPage (*Memory + EFI_PAGES_TO_SIZE (*NumberOfPages))) {
993 // No tail Guard, add one.
994 *NumberOfPages += 1;
995 }
996
997 if (!IsGuardPage (*Memory - EFI_PAGE_SIZE)) {
998 // No head Guard, add one.
999 *Memory -= EFI_PAGE_SIZE;
1000 *NumberOfPages += 1;
1001 }
1002 }
1003
1004 /**
1005 Adjust the pool head position to make sure the Guard page is adjavent to
1006 pool tail or pool head.
1007
1008 @param[in] Memory Base address of memory allocated.
1009 @param[in] NoPages Number of pages actually allocated.
1010 @param[in] Size Size of memory requested.
1011 (plus pool head/tail overhead)
1012
1013 @return Address of pool head.
1014 **/
1015 VOID *
1016 AdjustPoolHeadA (
1017 IN EFI_PHYSICAL_ADDRESS Memory,
1018 IN UINTN NoPages,
1019 IN UINTN Size
1020 )
1021 {
1022 if ((Memory == 0) || ((PcdGet8 (PcdHeapGuardPropertyMask) & BIT7) != 0)) {
1023 //
1024 // Pool head is put near the head Guard
1025 //
1026 return (VOID *)(UINTN)Memory;
1027 }
1028
1029 //
1030 // Pool head is put near the tail Guard
1031 //
1032 Size = ALIGN_VALUE (Size, 8);
1033 return (VOID *)(UINTN)(Memory + EFI_PAGES_TO_SIZE (NoPages) - Size);
1034 }
1035
1036 /**
1037 Get the page base address according to pool head address.
1038
1039 @param[in] Memory Head address of pool to free.
1040
1041 @return Address of pool head.
1042 **/
1043 VOID *
1044 AdjustPoolHeadF (
1045 IN EFI_PHYSICAL_ADDRESS Memory
1046 )
1047 {
1048 if ((Memory == 0) || ((PcdGet8 (PcdHeapGuardPropertyMask) & BIT7) != 0)) {
1049 //
1050 // Pool head is put near the head Guard
1051 //
1052 return (VOID *)(UINTN)Memory;
1053 }
1054
1055 //
1056 // Pool head is put near the tail Guard
1057 //
1058 return (VOID *)(UINTN)(Memory & ~EFI_PAGE_MASK);
1059 }
1060
1061 /**
1062 Allocate or free guarded memory.
1063
1064 @param[in] Start Start address of memory to allocate or free.
1065 @param[in] NumberOfPages Memory size in pages.
1066 @param[in] NewType Memory type to convert to.
1067
1068 @return VOID.
1069 **/
1070 EFI_STATUS
1071 CoreConvertPagesWithGuard (
1072 IN UINT64 Start,
1073 IN UINTN NumberOfPages,
1074 IN EFI_MEMORY_TYPE NewType
1075 )
1076 {
1077 UINT64 OldStart;
1078 UINTN OldPages;
1079
1080 if (NewType == EfiConventionalMemory) {
1081 OldStart = Start;
1082 OldPages = NumberOfPages;
1083
1084 AdjustMemoryF (&Start, &NumberOfPages);
1085 //
1086 // It's safe to unset Guard page inside memory lock because there should
1087 // be no memory allocation occurred in updating memory page attribute at
1088 // this point. And unsetting Guard page before free will prevent Guard
1089 // page just freed back to pool from being allocated right away before
1090 // marking it usable (from non-present to present).
1091 //
1092 UnsetGuardForMemory (OldStart, OldPages);
1093 if (NumberOfPages == 0) {
1094 return EFI_SUCCESS;
1095 }
1096 } else {
1097 AdjustMemoryA (&Start, &NumberOfPages);
1098 }
1099
1100 return CoreConvertPages (Start, NumberOfPages, NewType);
1101 }
1102
1103 /**
1104 Set all Guard pages which cannot be set before CPU Arch Protocol installed.
1105 **/
1106 VOID
1107 SetAllGuardPages (
1108 VOID
1109 )
1110 {
1111 UINTN Entries[GUARDED_HEAP_MAP_TABLE_DEPTH];
1112 UINTN Shifts[GUARDED_HEAP_MAP_TABLE_DEPTH];
1113 UINTN Indices[GUARDED_HEAP_MAP_TABLE_DEPTH];
1114 UINT64 Tables[GUARDED_HEAP_MAP_TABLE_DEPTH];
1115 UINT64 Addresses[GUARDED_HEAP_MAP_TABLE_DEPTH];
1116 UINT64 TableEntry;
1117 UINT64 Address;
1118 UINT64 GuardPage;
1119 INTN Level;
1120 UINTN Index;
1121 BOOLEAN OnGuarding;
1122
1123 if ((mGuardedMemoryMap == 0) ||
1124 (mMapLevel == 0) ||
1125 (mMapLevel > GUARDED_HEAP_MAP_TABLE_DEPTH))
1126 {
1127 return;
1128 }
1129
1130 CopyMem (Entries, mLevelMask, sizeof (Entries));
1131 CopyMem (Shifts, mLevelShift, sizeof (Shifts));
1132
1133 SetMem (Tables, sizeof (Tables), 0);
1134 SetMem (Addresses, sizeof (Addresses), 0);
1135 SetMem (Indices, sizeof (Indices), 0);
1136
1137 Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;
1138 Tables[Level] = mGuardedMemoryMap;
1139 Address = 0;
1140 OnGuarding = FALSE;
1141
1142 DEBUG_CODE (
1143 DumpGuardedMemoryBitmap ();
1144 );
1145
1146 while (TRUE) {
1147 if (Indices[Level] > Entries[Level]) {
1148 Tables[Level] = 0;
1149 Level -= 1;
1150 } else {
1151 TableEntry = ((UINT64 *)(UINTN)(Tables[Level]))[Indices[Level]];
1152 Address = Addresses[Level];
1153
1154 if (TableEntry == 0) {
1155 OnGuarding = FALSE;
1156 } else if (Level < GUARDED_HEAP_MAP_TABLE_DEPTH - 1) {
1157 Level += 1;
1158 Tables[Level] = TableEntry;
1159 Addresses[Level] = Address;
1160 Indices[Level] = 0;
1161
1162 continue;
1163 } else {
1164 Index = 0;
1165 while (Index < GUARDED_HEAP_MAP_ENTRY_BITS) {
1166 if ((TableEntry & 1) == 1) {
1167 if (OnGuarding) {
1168 GuardPage = 0;
1169 } else {
1170 GuardPage = Address - EFI_PAGE_SIZE;
1171 }
1172
1173 OnGuarding = TRUE;
1174 } else {
1175 if (OnGuarding) {
1176 GuardPage = Address;
1177 } else {
1178 GuardPage = 0;
1179 }
1180
1181 OnGuarding = FALSE;
1182 }
1183
1184 if (GuardPage != 0) {
1185 SetGuardPage (GuardPage);
1186 }
1187
1188 if (TableEntry == 0) {
1189 break;
1190 }
1191
1192 TableEntry = RShiftU64 (TableEntry, 1);
1193 Address += EFI_PAGE_SIZE;
1194 Index += 1;
1195 }
1196 }
1197 }
1198
1199 if (Level < (GUARDED_HEAP_MAP_TABLE_DEPTH - (INTN)mMapLevel)) {
1200 break;
1201 }
1202
1203 Indices[Level] += 1;
1204 Address = (Level == 0) ? 0 : Addresses[Level - 1];
1205 Addresses[Level] = Address | LShiftU64 (Indices[Level], Shifts[Level]);
1206 }
1207 }
1208
1209 /**
1210 Find the address of top-most guarded free page.
1211
1212 @param[out] Address Start address of top-most guarded free page.
1213
1214 @return VOID.
1215 **/
1216 VOID
1217 GetLastGuardedFreePageAddress (
1218 OUT EFI_PHYSICAL_ADDRESS *Address
1219 )
1220 {
1221 EFI_PHYSICAL_ADDRESS AddressGranularity;
1222 EFI_PHYSICAL_ADDRESS BaseAddress;
1223 UINTN Level;
1224 UINT64 Map;
1225 INTN Index;
1226
1227 ASSERT (mMapLevel >= 1);
1228
1229 BaseAddress = 0;
1230 Map = mGuardedMemoryMap;
1231 for (Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;
1232 Level < GUARDED_HEAP_MAP_TABLE_DEPTH;
1233 ++Level)
1234 {
1235 AddressGranularity = LShiftU64 (1, mLevelShift[Level]);
1236
1237 //
1238 // Find the non-NULL entry at largest index.
1239 //
1240 for (Index = (INTN)mLevelMask[Level]; Index >= 0; --Index) {
1241 if (((UINT64 *)(UINTN)Map)[Index] != 0) {
1242 BaseAddress += MultU64x32 (AddressGranularity, (UINT32)Index);
1243 Map = ((UINT64 *)(UINTN)Map)[Index];
1244 break;
1245 }
1246 }
1247 }
1248
1249 //
1250 // Find the non-zero MSB then get the page address.
1251 //
1252 while (Map != 0) {
1253 Map = RShiftU64 (Map, 1);
1254 BaseAddress += EFI_PAGES_TO_SIZE (1);
1255 }
1256
1257 *Address = BaseAddress;
1258 }
1259
1260 /**
1261 Record freed pages.
1262
1263 @param[in] BaseAddress Base address of just freed pages.
1264 @param[in] Pages Number of freed pages.
1265
1266 @return VOID.
1267 **/
1268 VOID
1269 MarkFreedPages (
1270 IN EFI_PHYSICAL_ADDRESS BaseAddress,
1271 IN UINTN Pages
1272 )
1273 {
1274 SetGuardedMemoryBits (BaseAddress, Pages);
1275 }
1276
1277 /**
1278 Record freed pages as well as mark them as not-present.
1279
1280 @param[in] BaseAddress Base address of just freed pages.
1281 @param[in] Pages Number of freed pages.
1282
1283 @return VOID.
1284 **/
1285 VOID
1286 EFIAPI
1287 GuardFreedPages (
1288 IN EFI_PHYSICAL_ADDRESS BaseAddress,
1289 IN UINTN Pages
1290 )
1291 {
1292 EFI_STATUS Status;
1293
1294 //
1295 // Legacy memory lower than 1MB might be accessed with no allocation. Leave
1296 // them alone.
1297 //
1298 if (BaseAddress < BASE_1MB) {
1299 return;
1300 }
1301
1302 MarkFreedPages (BaseAddress, Pages);
1303 if (gCpu != NULL) {
1304 //
1305 // Set flag to make sure allocating memory without GUARD for page table
1306 // operation; otherwise infinite loops could be caused.
1307 //
1308 mOnGuarding = TRUE;
1309 //
1310 // Note: This might overwrite other attributes needed by other features,
1311 // such as NX memory protection.
1312 //
1313 Status = gCpu->SetMemoryAttributes (
1314 gCpu,
1315 BaseAddress,
1316 EFI_PAGES_TO_SIZE (Pages),
1317 EFI_MEMORY_RP
1318 );
1319 //
1320 // Normally we should ASSERT the returned Status. But there might be memory
1321 // alloc/free involved in SetMemoryAttributes(), which might fail this
1322 // calling. It's rare case so it's OK to let a few tiny holes be not-guarded.
1323 //
1324 if (EFI_ERROR (Status)) {
1325 DEBUG ((DEBUG_WARN, "Failed to guard freed pages: %p (%lu)\n", BaseAddress, (UINT64)Pages));
1326 }
1327
1328 mOnGuarding = FALSE;
1329 }
1330 }
1331
1332 /**
1333 Record freed pages as well as mark them as not-present, if enabled.
1334
1335 @param[in] BaseAddress Base address of just freed pages.
1336 @param[in] Pages Number of freed pages.
1337
1338 @return VOID.
1339 **/
1340 VOID
1341 EFIAPI
1342 GuardFreedPagesChecked (
1343 IN EFI_PHYSICAL_ADDRESS BaseAddress,
1344 IN UINTN Pages
1345 )
1346 {
1347 if (IsHeapGuardEnabled (GUARD_HEAP_TYPE_FREED)) {
1348 GuardFreedPages (BaseAddress, Pages);
1349 }
1350 }
1351
1352 /**
1353 Mark all pages freed before CPU Arch Protocol as not-present.
1354
1355 **/
1356 VOID
1357 GuardAllFreedPages (
1358 VOID
1359 )
1360 {
1361 UINTN Entries[GUARDED_HEAP_MAP_TABLE_DEPTH];
1362 UINTN Shifts[GUARDED_HEAP_MAP_TABLE_DEPTH];
1363 UINTN Indices[GUARDED_HEAP_MAP_TABLE_DEPTH];
1364 UINT64 Tables[GUARDED_HEAP_MAP_TABLE_DEPTH];
1365 UINT64 Addresses[GUARDED_HEAP_MAP_TABLE_DEPTH];
1366 UINT64 TableEntry;
1367 UINT64 Address;
1368 UINT64 GuardPage;
1369 INTN Level;
1370 UINT64 BitIndex;
1371 UINTN GuardPageNumber;
1372
1373 if ((mGuardedMemoryMap == 0) ||
1374 (mMapLevel == 0) ||
1375 (mMapLevel > GUARDED_HEAP_MAP_TABLE_DEPTH))
1376 {
1377 return;
1378 }
1379
1380 CopyMem (Entries, mLevelMask, sizeof (Entries));
1381 CopyMem (Shifts, mLevelShift, sizeof (Shifts));
1382
1383 SetMem (Tables, sizeof (Tables), 0);
1384 SetMem (Addresses, sizeof (Addresses), 0);
1385 SetMem (Indices, sizeof (Indices), 0);
1386
1387 Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;
1388 Tables[Level] = mGuardedMemoryMap;
1389 Address = 0;
1390 GuardPage = (UINT64)-1;
1391 GuardPageNumber = 0;
1392
1393 while (TRUE) {
1394 if (Indices[Level] > Entries[Level]) {
1395 Tables[Level] = 0;
1396 Level -= 1;
1397 } else {
1398 TableEntry = ((UINT64 *)(UINTN)(Tables[Level]))[Indices[Level]];
1399 Address = Addresses[Level];
1400
1401 if (Level < GUARDED_HEAP_MAP_TABLE_DEPTH - 1) {
1402 Level += 1;
1403 Tables[Level] = TableEntry;
1404 Addresses[Level] = Address;
1405 Indices[Level] = 0;
1406
1407 continue;
1408 } else {
1409 BitIndex = 1;
1410 while (BitIndex != 0) {
1411 if ((TableEntry & BitIndex) != 0) {
1412 if (GuardPage == (UINT64)-1) {
1413 GuardPage = Address;
1414 }
1415
1416 ++GuardPageNumber;
1417 } else if (GuardPageNumber > 0) {
1418 GuardFreedPages (GuardPage, GuardPageNumber);
1419 GuardPageNumber = 0;
1420 GuardPage = (UINT64)-1;
1421 }
1422
1423 if (TableEntry == 0) {
1424 break;
1425 }
1426
1427 Address += EFI_PAGES_TO_SIZE (1);
1428 BitIndex = LShiftU64 (BitIndex, 1);
1429 }
1430 }
1431 }
1432
1433 if (Level < (GUARDED_HEAP_MAP_TABLE_DEPTH - (INTN)mMapLevel)) {
1434 break;
1435 }
1436
1437 Indices[Level] += 1;
1438 Address = (Level == 0) ? 0 : Addresses[Level - 1];
1439 Addresses[Level] = Address | LShiftU64 (Indices[Level], Shifts[Level]);
1440 }
1441
1442 //
1443 // Update the maximum address of freed page which can be used for memory
1444 // promotion upon out-of-memory-space.
1445 //
1446 GetLastGuardedFreePageAddress (&Address);
1447 if (Address != 0) {
1448 mLastPromotedPage = Address;
1449 }
1450 }
1451
1452 /**
1453 This function checks to see if the given memory map descriptor in a memory map
1454 can be merged with any guarded free pages.
1455
1456 @param MemoryMapEntry A pointer to a descriptor in MemoryMap.
1457 @param MaxAddress Maximum address to stop the merge.
1458
1459 @return VOID
1460
1461 **/
1462 VOID
1463 MergeGuardPages (
1464 IN EFI_MEMORY_DESCRIPTOR *MemoryMapEntry,
1465 IN EFI_PHYSICAL_ADDRESS MaxAddress
1466 )
1467 {
1468 EFI_PHYSICAL_ADDRESS EndAddress;
1469 UINT64 Bitmap;
1470 INTN Pages;
1471
1472 if (!IsHeapGuardEnabled (GUARD_HEAP_TYPE_FREED) ||
1473 (MemoryMapEntry->Type >= EfiMemoryMappedIO))
1474 {
1475 return;
1476 }
1477
1478 Bitmap = 0;
1479 Pages = EFI_SIZE_TO_PAGES ((UINTN)(MaxAddress - MemoryMapEntry->PhysicalStart));
1480 Pages -= (INTN)MemoryMapEntry->NumberOfPages;
1481 while (Pages > 0) {
1482 if (Bitmap == 0) {
1483 EndAddress = MemoryMapEntry->PhysicalStart +
1484 EFI_PAGES_TO_SIZE ((UINTN)MemoryMapEntry->NumberOfPages);
1485 Bitmap = GetGuardedMemoryBits (EndAddress, GUARDED_HEAP_MAP_ENTRY_BITS);
1486 }
1487
1488 if ((Bitmap & 1) == 0) {
1489 break;
1490 }
1491
1492 Pages--;
1493 MemoryMapEntry->NumberOfPages++;
1494 Bitmap = RShiftU64 (Bitmap, 1);
1495 }
1496 }
1497
1498 /**
1499 Put part (at most 64 pages a time) guarded free pages back to free page pool.
1500
1501 Freed memory guard is used to detect Use-After-Free (UAF) memory issue, which
1502 makes use of 'Used then throw away' way to detect any illegal access to freed
1503 memory. The thrown-away memory will be marked as not-present so that any access
1504 to those memory (after free) will be caught by page-fault exception.
1505
1506 The problem is that this will consume lots of memory space. Once no memory
1507 left in pool to allocate, we have to restore part of the freed pages to their
1508 normal function. Otherwise the whole system will stop functioning.
1509
1510 @param StartAddress Start address of promoted memory.
1511 @param EndAddress End address of promoted memory.
1512
1513 @return TRUE Succeeded to promote memory.
1514 @return FALSE No free memory found.
1515
1516 **/
1517 BOOLEAN
1518 PromoteGuardedFreePages (
1519 OUT EFI_PHYSICAL_ADDRESS *StartAddress,
1520 OUT EFI_PHYSICAL_ADDRESS *EndAddress
1521 )
1522 {
1523 EFI_STATUS Status;
1524 UINTN AvailablePages;
1525 UINT64 Bitmap;
1526 EFI_PHYSICAL_ADDRESS Start;
1527
1528 if (!IsHeapGuardEnabled (GUARD_HEAP_TYPE_FREED)) {
1529 return FALSE;
1530 }
1531
1532 //
1533 // Similar to memory allocation service, always search the freed pages in
1534 // descending direction.
1535 //
1536 Start = mLastPromotedPage;
1537 AvailablePages = 0;
1538 while (AvailablePages == 0) {
1539 Start -= EFI_PAGES_TO_SIZE (GUARDED_HEAP_MAP_ENTRY_BITS);
1540 //
1541 // If the address wraps around, try the really freed pages at top.
1542 //
1543 if (Start > mLastPromotedPage) {
1544 GetLastGuardedFreePageAddress (&Start);
1545 ASSERT (Start != 0);
1546 Start -= EFI_PAGES_TO_SIZE (GUARDED_HEAP_MAP_ENTRY_BITS);
1547 }
1548
1549 Bitmap = GetGuardedMemoryBits (Start, GUARDED_HEAP_MAP_ENTRY_BITS);
1550 while (Bitmap > 0) {
1551 if ((Bitmap & 1) != 0) {
1552 ++AvailablePages;
1553 } else if (AvailablePages == 0) {
1554 Start += EFI_PAGES_TO_SIZE (1);
1555 } else {
1556 break;
1557 }
1558
1559 Bitmap = RShiftU64 (Bitmap, 1);
1560 }
1561 }
1562
1563 if (AvailablePages != 0) {
1564 DEBUG ((DEBUG_INFO, "Promoted pages: %lX (%lx)\r\n", Start, (UINT64)AvailablePages));
1565 ClearGuardedMemoryBits (Start, AvailablePages);
1566
1567 if (gCpu != NULL) {
1568 //
1569 // Set flag to make sure allocating memory without GUARD for page table
1570 // operation; otherwise infinite loops could be caused.
1571 //
1572 mOnGuarding = TRUE;
1573 Status = gCpu->SetMemoryAttributes (gCpu, Start, EFI_PAGES_TO_SIZE (AvailablePages), 0);
1574 ASSERT_EFI_ERROR (Status);
1575 mOnGuarding = FALSE;
1576 }
1577
1578 mLastPromotedPage = Start;
1579 *StartAddress = Start;
1580 *EndAddress = Start + EFI_PAGES_TO_SIZE (AvailablePages) - 1;
1581 return TRUE;
1582 }
1583
1584 return FALSE;
1585 }
1586
1587 /**
1588 Notify function used to set all Guard pages before CPU Arch Protocol installed.
1589 **/
1590 VOID
1591 HeapGuardCpuArchProtocolNotify (
1592 VOID
1593 )
1594 {
1595 ASSERT (gCpu != NULL);
1596
1597 if (IsHeapGuardEnabled (GUARD_HEAP_TYPE_PAGE|GUARD_HEAP_TYPE_POOL) &&
1598 IsHeapGuardEnabled (GUARD_HEAP_TYPE_FREED))
1599 {
1600 DEBUG ((DEBUG_ERROR, "Heap guard and freed memory guard cannot be enabled at the same time.\n"));
1601 CpuDeadLoop ();
1602 }
1603
1604 if (IsHeapGuardEnabled (GUARD_HEAP_TYPE_PAGE|GUARD_HEAP_TYPE_POOL)) {
1605 SetAllGuardPages ();
1606 }
1607
1608 if (IsHeapGuardEnabled (GUARD_HEAP_TYPE_FREED)) {
1609 GuardAllFreedPages ();
1610 }
1611 }
1612
1613 /**
1614 Helper function to convert a UINT64 value in binary to a string.
1615
1616 @param[in] Value Value of a UINT64 integer.
1617 @param[out] BinString String buffer to contain the conversion result.
1618
1619 @return VOID.
1620 **/
1621 VOID
1622 Uint64ToBinString (
1623 IN UINT64 Value,
1624 OUT CHAR8 *BinString
1625 )
1626 {
1627 UINTN Index;
1628
1629 if (BinString == NULL) {
1630 return;
1631 }
1632
1633 for (Index = 64; Index > 0; --Index) {
1634 BinString[Index - 1] = '0' + (Value & 1);
1635 Value = RShiftU64 (Value, 1);
1636 }
1637
1638 BinString[64] = '\0';
1639 }
1640
1641 /**
1642 Dump the guarded memory bit map.
1643 **/
1644 VOID
1645 EFIAPI
1646 DumpGuardedMemoryBitmap (
1647 VOID
1648 )
1649 {
1650 UINTN Entries[GUARDED_HEAP_MAP_TABLE_DEPTH];
1651 UINTN Shifts[GUARDED_HEAP_MAP_TABLE_DEPTH];
1652 UINTN Indices[GUARDED_HEAP_MAP_TABLE_DEPTH];
1653 UINT64 Tables[GUARDED_HEAP_MAP_TABLE_DEPTH];
1654 UINT64 Addresses[GUARDED_HEAP_MAP_TABLE_DEPTH];
1655 UINT64 TableEntry;
1656 UINT64 Address;
1657 INTN Level;
1658 UINTN RepeatZero;
1659 CHAR8 String[GUARDED_HEAP_MAP_ENTRY_BITS + 1];
1660 CHAR8 *Ruler1;
1661 CHAR8 *Ruler2;
1662
1663 if (!IsHeapGuardEnabled (GUARD_HEAP_TYPE_ALL)) {
1664 return;
1665 }
1666
1667 if ((mGuardedMemoryMap == 0) ||
1668 (mMapLevel == 0) ||
1669 (mMapLevel > GUARDED_HEAP_MAP_TABLE_DEPTH))
1670 {
1671 return;
1672 }
1673
1674 Ruler1 = " 3 2 1 0";
1675 Ruler2 = "FEDCBA9876543210FEDCBA9876543210FEDCBA9876543210FEDCBA9876543210";
1676
1677 DEBUG ((
1678 HEAP_GUARD_DEBUG_LEVEL,
1679 "============================="
1680 " Guarded Memory Bitmap "
1681 "==============================\r\n"
1682 ));
1683 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, " %a\r\n", Ruler1));
1684 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, " %a\r\n", Ruler2));
1685
1686 CopyMem (Entries, mLevelMask, sizeof (Entries));
1687 CopyMem (Shifts, mLevelShift, sizeof (Shifts));
1688
1689 SetMem (Indices, sizeof (Indices), 0);
1690 SetMem (Tables, sizeof (Tables), 0);
1691 SetMem (Addresses, sizeof (Addresses), 0);
1692
1693 Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;
1694 Tables[Level] = mGuardedMemoryMap;
1695 Address = 0;
1696 RepeatZero = 0;
1697
1698 while (TRUE) {
1699 if (Indices[Level] > Entries[Level]) {
1700 Tables[Level] = 0;
1701 Level -= 1;
1702 RepeatZero = 0;
1703
1704 DEBUG ((
1705 HEAP_GUARD_DEBUG_LEVEL,
1706 "========================================="
1707 "=========================================\r\n"
1708 ));
1709 } else {
1710 TableEntry = ((UINT64 *)(UINTN)Tables[Level])[Indices[Level]];
1711 Address = Addresses[Level];
1712
1713 if (TableEntry == 0) {
1714 if (Level == GUARDED_HEAP_MAP_TABLE_DEPTH - 1) {
1715 if (RepeatZero == 0) {
1716 Uint64ToBinString (TableEntry, String);
1717 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "%016lx: %a\r\n", Address, String));
1718 } else if (RepeatZero == 1) {
1719 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "... : ...\r\n"));
1720 }
1721
1722 RepeatZero += 1;
1723 }
1724 } else if (Level < GUARDED_HEAP_MAP_TABLE_DEPTH - 1) {
1725 Level += 1;
1726 Tables[Level] = TableEntry;
1727 Addresses[Level] = Address;
1728 Indices[Level] = 0;
1729 RepeatZero = 0;
1730
1731 continue;
1732 } else {
1733 RepeatZero = 0;
1734 Uint64ToBinString (TableEntry, String);
1735 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "%016lx: %a\r\n", Address, String));
1736 }
1737 }
1738
1739 if (Level < (GUARDED_HEAP_MAP_TABLE_DEPTH - (INTN)mMapLevel)) {
1740 break;
1741 }
1742
1743 Indices[Level] += 1;
1744 Address = (Level == 0) ? 0 : Addresses[Level - 1];
1745 Addresses[Level] = Address | LShiftU64 (Indices[Level], Shifts[Level]);
1746 }
1747 }