]> git.proxmox.com Git - mirror_edk2.git/blob - MdeModulePkg/Core/PiSmmCore/HeapGuard.c
MdeModulePkg/PiSmmCore: Fix issues in Heap Guard
[mirror_edk2.git] / MdeModulePkg / Core / PiSmmCore / HeapGuard.c
1 /** @file
2 UEFI Heap Guard functions.
3
4 Copyright (c) 2017, Intel Corporation. All rights reserved.<BR>
5 This program and the accompanying materials
6 are licensed and made available under the terms and conditions of the BSD License
7 which accompanies this distribution. The full text of the license may be found at
8 http://opensource.org/licenses/bsd-license.php
9
10 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
11 WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
12
13 **/
14
15 #include "HeapGuard.h"
16
17 //
18 // Global to avoid infinite reentrance of memory allocation when updating
19 // page table attributes, which may need allocating pages for new PDE/PTE.
20 //
21 GLOBAL_REMOVE_IF_UNREFERENCED BOOLEAN mOnGuarding = FALSE;
22
23 //
24 // Pointer to table tracking the Guarded memory with bitmap, in which '1'
25 // is used to indicate memory guarded. '0' might be free memory or Guard
26 // page itself, depending on status of memory adjacent to it.
27 //
28 GLOBAL_REMOVE_IF_UNREFERENCED UINT64 mGuardedMemoryMap = 0;
29
30 //
31 // Current depth level of map table pointed by mGuardedMemoryMap.
32 // mMapLevel must be initialized at least by 1. It will be automatically
33 // updated according to the address of memory just tracked.
34 //
35 GLOBAL_REMOVE_IF_UNREFERENCED UINTN mMapLevel = 1;
36
37 //
38 // Shift and mask for each level of map table
39 //
40 GLOBAL_REMOVE_IF_UNREFERENCED UINTN mLevelShift[GUARDED_HEAP_MAP_TABLE_DEPTH]
41 = GUARDED_HEAP_MAP_TABLE_DEPTH_SHIFTS;
42 GLOBAL_REMOVE_IF_UNREFERENCED UINTN mLevelMask[GUARDED_HEAP_MAP_TABLE_DEPTH]
43 = GUARDED_HEAP_MAP_TABLE_DEPTH_MASKS;
44
45 //
46 // SMM memory attribute protocol
47 //
48 EDKII_SMM_MEMORY_ATTRIBUTE_PROTOCOL *mSmmMemoryAttribute = NULL;
49
50 /**
51 Set corresponding bits in bitmap table to 1 according to the address.
52
53 @param[in] Address Start address to set for.
54 @param[in] BitNumber Number of bits to set.
55 @param[in] BitMap Pointer to bitmap which covers the Address.
56
57 @return VOID
58 **/
59 STATIC
60 VOID
61 SetBits (
62 IN EFI_PHYSICAL_ADDRESS Address,
63 IN UINTN BitNumber,
64 IN UINT64 *BitMap
65 )
66 {
67 UINTN Lsbs;
68 UINTN Qwords;
69 UINTN Msbs;
70 UINTN StartBit;
71 UINTN EndBit;
72
73 StartBit = (UINTN)GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address);
74 EndBit = (StartBit + BitNumber - 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
75
76 if ((StartBit + BitNumber) > GUARDED_HEAP_MAP_ENTRY_BITS) {
77 Msbs = (GUARDED_HEAP_MAP_ENTRY_BITS - StartBit) %
78 GUARDED_HEAP_MAP_ENTRY_BITS;
79 Lsbs = (EndBit + 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
80 Qwords = (BitNumber - Msbs) / GUARDED_HEAP_MAP_ENTRY_BITS;
81 } else {
82 Msbs = BitNumber;
83 Lsbs = 0;
84 Qwords = 0;
85 }
86
87 if (Msbs > 0) {
88 *BitMap |= LShiftU64 (LShiftU64 (1, Msbs) - 1, StartBit);
89 BitMap += 1;
90 }
91
92 if (Qwords > 0) {
93 SetMem64 ((VOID *)BitMap, Qwords * GUARDED_HEAP_MAP_ENTRY_BYTES,
94 (UINT64)-1);
95 BitMap += Qwords;
96 }
97
98 if (Lsbs > 0) {
99 *BitMap |= (LShiftU64 (1, Lsbs) - 1);
100 }
101 }
102
103 /**
104 Set corresponding bits in bitmap table to 0 according to the address.
105
106 @param[in] Address Start address to set for.
107 @param[in] BitNumber Number of bits to set.
108 @param[in] BitMap Pointer to bitmap which covers the Address.
109
110 @return VOID.
111 **/
112 STATIC
113 VOID
114 ClearBits (
115 IN EFI_PHYSICAL_ADDRESS Address,
116 IN UINTN BitNumber,
117 IN UINT64 *BitMap
118 )
119 {
120 UINTN Lsbs;
121 UINTN Qwords;
122 UINTN Msbs;
123 UINTN StartBit;
124 UINTN EndBit;
125
126 StartBit = (UINTN)GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address);
127 EndBit = (StartBit + BitNumber - 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
128
129 if ((StartBit + BitNumber) > GUARDED_HEAP_MAP_ENTRY_BITS) {
130 Msbs = (GUARDED_HEAP_MAP_ENTRY_BITS - StartBit) %
131 GUARDED_HEAP_MAP_ENTRY_BITS;
132 Lsbs = (EndBit + 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
133 Qwords = (BitNumber - Msbs) / GUARDED_HEAP_MAP_ENTRY_BITS;
134 } else {
135 Msbs = BitNumber;
136 Lsbs = 0;
137 Qwords = 0;
138 }
139
140 if (Msbs > 0) {
141 *BitMap &= ~LShiftU64 (LShiftU64 (1, Msbs) - 1, StartBit);
142 BitMap += 1;
143 }
144
145 if (Qwords > 0) {
146 SetMem64 ((VOID *)BitMap, Qwords * GUARDED_HEAP_MAP_ENTRY_BYTES, 0);
147 BitMap += Qwords;
148 }
149
150 if (Lsbs > 0) {
151 *BitMap &= ~(LShiftU64 (1, Lsbs) - 1);
152 }
153 }
154
155 /**
156 Get corresponding bits in bitmap table according to the address.
157
158 The value of bit 0 corresponds to the status of memory at given Address.
159 No more than 64 bits can be retrieved in one call.
160
161 @param[in] Address Start address to retrieve bits for.
162 @param[in] BitNumber Number of bits to get.
163 @param[in] BitMap Pointer to bitmap which covers the Address.
164
165 @return An integer containing the bits information.
166 **/
167 STATIC
168 UINT64
169 GetBits (
170 IN EFI_PHYSICAL_ADDRESS Address,
171 IN UINTN BitNumber,
172 IN UINT64 *BitMap
173 )
174 {
175 UINTN StartBit;
176 UINTN EndBit;
177 UINTN Lsbs;
178 UINTN Msbs;
179 UINT64 Result;
180
181 ASSERT (BitNumber <= GUARDED_HEAP_MAP_ENTRY_BITS);
182
183 StartBit = (UINTN)GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address);
184 EndBit = (StartBit + BitNumber - 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
185
186 if ((StartBit + BitNumber) > GUARDED_HEAP_MAP_ENTRY_BITS) {
187 Msbs = GUARDED_HEAP_MAP_ENTRY_BITS - StartBit;
188 Lsbs = (EndBit + 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
189 } else {
190 Msbs = BitNumber;
191 Lsbs = 0;
192 }
193
194 Result = RShiftU64 ((*BitMap), StartBit) & (LShiftU64 (1, Msbs) - 1);
195 if (Lsbs > 0) {
196 BitMap += 1;
197 Result |= LShiftU64 ((*BitMap) & (LShiftU64 (1, Lsbs) - 1), Msbs);
198 }
199
200 return Result;
201 }
202
203 /**
204 Helper function to allocate pages without Guard for internal uses.
205
206 @param[in] Pages Page number.
207
208 @return Address of memory allocated.
209 **/
210 VOID *
211 PageAlloc (
212 IN UINTN Pages
213 )
214 {
215 EFI_STATUS Status;
216 EFI_PHYSICAL_ADDRESS Memory;
217
218 Status = SmmInternalAllocatePages (AllocateAnyPages, EfiRuntimeServicesData,
219 Pages, &Memory, FALSE);
220 if (EFI_ERROR (Status)) {
221 Memory = 0;
222 }
223
224 return (VOID *)(UINTN)Memory;
225 }
226
227 /**
228 Locate the pointer of bitmap from the guarded memory bitmap tables, which
229 covers the given Address.
230
231 @param[in] Address Start address to search the bitmap for.
232 @param[in] AllocMapUnit Flag to indicate memory allocation for the table.
233 @param[out] BitMap Pointer to bitmap which covers the Address.
234
235 @return The bit number from given Address to the end of current map table.
236 **/
237 UINTN
238 FindGuardedMemoryMap (
239 IN EFI_PHYSICAL_ADDRESS Address,
240 IN BOOLEAN AllocMapUnit,
241 OUT UINT64 **BitMap
242 )
243 {
244 UINTN Level;
245 UINT64 *GuardMap;
246 UINT64 MapMemory;
247 UINTN Index;
248 UINTN Size;
249 UINTN BitsToUnitEnd;
250
251 //
252 // Adjust current map table depth according to the address to access
253 //
254 while (mMapLevel < GUARDED_HEAP_MAP_TABLE_DEPTH
255 &&
256 RShiftU64 (
257 Address,
258 mLevelShift[GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel - 1]
259 ) != 0) {
260
261 if (mGuardedMemoryMap != 0) {
262 Size = (mLevelMask[GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel - 1] + 1)
263 * GUARDED_HEAP_MAP_ENTRY_BYTES;
264 MapMemory = (UINT64)(UINTN)PageAlloc (EFI_SIZE_TO_PAGES (Size));
265 ASSERT (MapMemory != 0);
266
267 SetMem ((VOID *)(UINTN)MapMemory, Size, 0);
268
269 *(UINT64 *)(UINTN)MapMemory = mGuardedMemoryMap;
270 mGuardedMemoryMap = MapMemory;
271 }
272
273 mMapLevel++;
274
275 }
276
277 GuardMap = &mGuardedMemoryMap;
278 for (Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;
279 Level < GUARDED_HEAP_MAP_TABLE_DEPTH;
280 ++Level) {
281
282 if (*GuardMap == 0) {
283 if (!AllocMapUnit) {
284 GuardMap = NULL;
285 break;
286 }
287
288 Size = (mLevelMask[Level] + 1) * GUARDED_HEAP_MAP_ENTRY_BYTES;
289 MapMemory = (UINT64)(UINTN)PageAlloc (EFI_SIZE_TO_PAGES (Size));
290 ASSERT (MapMemory != 0);
291
292 SetMem ((VOID *)(UINTN)MapMemory, Size, 0);
293 *GuardMap = MapMemory;
294 }
295
296 Index = (UINTN)RShiftU64 (Address, mLevelShift[Level]);
297 Index &= mLevelMask[Level];
298 GuardMap = (UINT64 *)(UINTN)((*GuardMap) + Index * sizeof (UINT64));
299
300 }
301
302 BitsToUnitEnd = GUARDED_HEAP_MAP_BITS - GUARDED_HEAP_MAP_BIT_INDEX (Address);
303 *BitMap = GuardMap;
304
305 return BitsToUnitEnd;
306 }
307
308 /**
309 Set corresponding bits in bitmap table to 1 according to given memory range.
310
311 @param[in] Address Memory address to guard from.
312 @param[in] NumberOfPages Number of pages to guard.
313
314 @return VOID
315 **/
316 VOID
317 EFIAPI
318 SetGuardedMemoryBits (
319 IN EFI_PHYSICAL_ADDRESS Address,
320 IN UINTN NumberOfPages
321 )
322 {
323 UINT64 *BitMap;
324 UINTN Bits;
325 UINTN BitsToUnitEnd;
326
327 while (NumberOfPages > 0) {
328 BitsToUnitEnd = FindGuardedMemoryMap (Address, TRUE, &BitMap);
329 ASSERT (BitMap != NULL);
330
331 if (NumberOfPages > BitsToUnitEnd) {
332 // Cross map unit
333 Bits = BitsToUnitEnd;
334 } else {
335 Bits = NumberOfPages;
336 }
337
338 SetBits (Address, Bits, BitMap);
339
340 NumberOfPages -= Bits;
341 Address += EFI_PAGES_TO_SIZE (Bits);
342 }
343 }
344
345 /**
346 Clear corresponding bits in bitmap table according to given memory range.
347
348 @param[in] Address Memory address to unset from.
349 @param[in] NumberOfPages Number of pages to unset guard.
350
351 @return VOID
352 **/
353 VOID
354 EFIAPI
355 ClearGuardedMemoryBits (
356 IN EFI_PHYSICAL_ADDRESS Address,
357 IN UINTN NumberOfPages
358 )
359 {
360 UINT64 *BitMap;
361 UINTN Bits;
362 UINTN BitsToUnitEnd;
363
364 while (NumberOfPages > 0) {
365 BitsToUnitEnd = FindGuardedMemoryMap (Address, TRUE, &BitMap);
366 ASSERT (BitMap != NULL);
367
368 if (NumberOfPages > BitsToUnitEnd) {
369 // Cross map unit
370 Bits = BitsToUnitEnd;
371 } else {
372 Bits = NumberOfPages;
373 }
374
375 ClearBits (Address, Bits, BitMap);
376
377 NumberOfPages -= Bits;
378 Address += EFI_PAGES_TO_SIZE (Bits);
379 }
380 }
381
382 /**
383 Retrieve corresponding bits in bitmap table according to given memory range.
384
385 @param[in] Address Memory address to retrieve from.
386 @param[in] NumberOfPages Number of pages to retrieve.
387
388 @return An integer containing the guarded memory bitmap.
389 **/
390 UINTN
391 GetGuardedMemoryBits (
392 IN EFI_PHYSICAL_ADDRESS Address,
393 IN UINTN NumberOfPages
394 )
395 {
396 UINT64 *BitMap;
397 UINTN Bits;
398 UINTN Result;
399 UINTN Shift;
400 UINTN BitsToUnitEnd;
401
402 ASSERT (NumberOfPages <= GUARDED_HEAP_MAP_ENTRY_BITS);
403
404 Result = 0;
405 Shift = 0;
406 while (NumberOfPages > 0) {
407 BitsToUnitEnd = FindGuardedMemoryMap (Address, FALSE, &BitMap);
408
409 if (NumberOfPages > BitsToUnitEnd) {
410 // Cross map unit
411 Bits = BitsToUnitEnd;
412 } else {
413 Bits = NumberOfPages;
414 }
415
416 if (BitMap != NULL) {
417 Result |= LShiftU64 (GetBits (Address, Bits, BitMap), Shift);
418 }
419
420 Shift += Bits;
421 NumberOfPages -= Bits;
422 Address += EFI_PAGES_TO_SIZE (Bits);
423 }
424
425 return Result;
426 }
427
428 /**
429 Get bit value in bitmap table for the given address.
430
431 @param[in] Address The address to retrieve for.
432
433 @return 1 or 0.
434 **/
435 UINTN
436 EFIAPI
437 GetGuardMapBit (
438 IN EFI_PHYSICAL_ADDRESS Address
439 )
440 {
441 UINT64 *GuardMap;
442
443 FindGuardedMemoryMap (Address, FALSE, &GuardMap);
444 if (GuardMap != NULL) {
445 if (RShiftU64 (*GuardMap,
446 GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address)) & 1) {
447 return 1;
448 }
449 }
450
451 return 0;
452 }
453
454 /**
455 Set the bit in bitmap table for the given address.
456
457 @param[in] Address The address to set for.
458
459 @return VOID.
460 **/
461 VOID
462 EFIAPI
463 SetGuardMapBit (
464 IN EFI_PHYSICAL_ADDRESS Address
465 )
466 {
467 UINT64 *GuardMap;
468 UINT64 BitMask;
469
470 FindGuardedMemoryMap (Address, TRUE, &GuardMap);
471 if (GuardMap != NULL) {
472 BitMask = LShiftU64 (1, GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address));
473 *GuardMap |= BitMask;
474 }
475 }
476
477 /**
478 Clear the bit in bitmap table for the given address.
479
480 @param[in] Address The address to clear for.
481
482 @return VOID.
483 **/
484 VOID
485 EFIAPI
486 ClearGuardMapBit (
487 IN EFI_PHYSICAL_ADDRESS Address
488 )
489 {
490 UINT64 *GuardMap;
491 UINT64 BitMask;
492
493 FindGuardedMemoryMap (Address, TRUE, &GuardMap);
494 if (GuardMap != NULL) {
495 BitMask = LShiftU64 (1, GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address));
496 *GuardMap &= ~BitMask;
497 }
498 }
499
500 /**
501 Check to see if the page at the given address is a Guard page or not.
502
503 @param[in] Address The address to check for.
504
505 @return TRUE The page at Address is a Guard page.
506 @return FALSE The page at Address is not a Guard page.
507 **/
508 BOOLEAN
509 EFIAPI
510 IsGuardPage (
511 IN EFI_PHYSICAL_ADDRESS Address
512 )
513 {
514 UINTN BitMap;
515
516 //
517 // There must be at least one guarded page before and/or after given
518 // address if it's a Guard page. The bitmap pattern should be one of
519 // 001, 100 and 101
520 //
521 BitMap = GetGuardedMemoryBits (Address - EFI_PAGE_SIZE, 3);
522 return ((BitMap == BIT0) || (BitMap == BIT2) || (BitMap == (BIT2 | BIT0)));
523 }
524
525 /**
526 Check to see if the page at the given address is a head Guard page or not.
527
528 @param[in] Address The address to check for.
529
530 @return TRUE The page at Address is a head Guard page.
531 @return FALSE The page at Address is not a head Guard page.
532 **/
533 BOOLEAN
534 EFIAPI
535 IsHeadGuard (
536 IN EFI_PHYSICAL_ADDRESS Address
537 )
538 {
539 return (GetGuardedMemoryBits (Address, 2) == BIT1);
540 }
541
542 /**
543 Check to see if the page at the given address is a tail Guard page or not.
544
545 @param[in] Address The address to check for.
546
547 @return TRUE The page at Address is a tail Guard page.
548 @return FALSE The page at Address is not a tail Guard page.
549 **/
550 BOOLEAN
551 EFIAPI
552 IsTailGuard (
553 IN EFI_PHYSICAL_ADDRESS Address
554 )
555 {
556 return (GetGuardedMemoryBits (Address - EFI_PAGE_SIZE, 2) == BIT0);
557 }
558
559 /**
560 Check to see if the page at the given address is guarded or not.
561
562 @param[in] Address The address to check for.
563
564 @return TRUE The page at Address is guarded.
565 @return FALSE The page at Address is not guarded.
566 **/
567 BOOLEAN
568 EFIAPI
569 IsMemoryGuarded (
570 IN EFI_PHYSICAL_ADDRESS Address
571 )
572 {
573 return (GetGuardMapBit (Address) == 1);
574 }
575
576 /**
577 Set the page at the given address to be a Guard page.
578
579 This is done by changing the page table attribute to be NOT PRSENT.
580
581 @param[in] BaseAddress Page address to Guard at.
582
583 @return VOID.
584 **/
585 VOID
586 EFIAPI
587 SetGuardPage (
588 IN EFI_PHYSICAL_ADDRESS BaseAddress
589 )
590 {
591 if (mSmmMemoryAttribute != NULL) {
592 mOnGuarding = TRUE;
593 mSmmMemoryAttribute->SetMemoryAttributes (
594 mSmmMemoryAttribute,
595 BaseAddress,
596 EFI_PAGE_SIZE,
597 EFI_MEMORY_RP
598 );
599 mOnGuarding = FALSE;
600 }
601 }
602
603 /**
604 Unset the Guard page at the given address to the normal memory.
605
606 This is done by changing the page table attribute to be PRSENT.
607
608 @param[in] BaseAddress Page address to Guard at.
609
610 @return VOID.
611 **/
612 VOID
613 EFIAPI
614 UnsetGuardPage (
615 IN EFI_PHYSICAL_ADDRESS BaseAddress
616 )
617 {
618 if (mSmmMemoryAttribute != NULL) {
619 mOnGuarding = TRUE;
620 mSmmMemoryAttribute->ClearMemoryAttributes (
621 mSmmMemoryAttribute,
622 BaseAddress,
623 EFI_PAGE_SIZE,
624 EFI_MEMORY_RP
625 );
626 mOnGuarding = FALSE;
627 }
628 }
629
630 /**
631 Check to see if the memory at the given address should be guarded or not.
632
633 @param[in] MemoryType Memory type to check.
634 @param[in] AllocateType Allocation type to check.
635 @param[in] PageOrPool Indicate a page allocation or pool allocation.
636
637
638 @return TRUE The given type of memory should be guarded.
639 @return FALSE The given type of memory should not be guarded.
640 **/
641 BOOLEAN
642 IsMemoryTypeToGuard (
643 IN EFI_MEMORY_TYPE MemoryType,
644 IN EFI_ALLOCATE_TYPE AllocateType,
645 IN UINT8 PageOrPool
646 )
647 {
648 UINT64 TestBit;
649 UINT64 ConfigBit;
650
651 if ((PcdGet8 (PcdHeapGuardPropertyMask) & PageOrPool) == 0
652 || mOnGuarding
653 || AllocateType == AllocateAddress) {
654 return FALSE;
655 }
656
657 ConfigBit = 0;
658 if ((PageOrPool & GUARD_HEAP_TYPE_POOL) != 0) {
659 ConfigBit |= PcdGet64 (PcdHeapGuardPoolType);
660 }
661
662 if ((PageOrPool & GUARD_HEAP_TYPE_PAGE) != 0) {
663 ConfigBit |= PcdGet64 (PcdHeapGuardPageType);
664 }
665
666 if (MemoryType == EfiRuntimeServicesData ||
667 MemoryType == EfiRuntimeServicesCode) {
668 TestBit = LShiftU64 (1, MemoryType);
669 } else if (MemoryType == EfiMaxMemoryType) {
670 TestBit = (UINT64)-1;
671 } else {
672 TestBit = 0;
673 }
674
675 return ((ConfigBit & TestBit) != 0);
676 }
677
678 /**
679 Check to see if the pool at the given address should be guarded or not.
680
681 @param[in] MemoryType Pool type to check.
682
683
684 @return TRUE The given type of pool should be guarded.
685 @return FALSE The given type of pool should not be guarded.
686 **/
687 BOOLEAN
688 IsPoolTypeToGuard (
689 IN EFI_MEMORY_TYPE MemoryType
690 )
691 {
692 return IsMemoryTypeToGuard (MemoryType, AllocateAnyPages,
693 GUARD_HEAP_TYPE_POOL);
694 }
695
696 /**
697 Check to see if the page at the given address should be guarded or not.
698
699 @param[in] MemoryType Page type to check.
700 @param[in] AllocateType Allocation type to check.
701
702 @return TRUE The given type of page should be guarded.
703 @return FALSE The given type of page should not be guarded.
704 **/
705 BOOLEAN
706 IsPageTypeToGuard (
707 IN EFI_MEMORY_TYPE MemoryType,
708 IN EFI_ALLOCATE_TYPE AllocateType
709 )
710 {
711 return IsMemoryTypeToGuard (MemoryType, AllocateType, GUARD_HEAP_TYPE_PAGE);
712 }
713
714 /**
715 Check to see if the heap guard is enabled for page and/or pool allocation.
716
717 @return TRUE/FALSE.
718 **/
719 BOOLEAN
720 IsHeapGuardEnabled (
721 VOID
722 )
723 {
724 return IsMemoryTypeToGuard (EfiMaxMemoryType, AllocateAnyPages,
725 GUARD_HEAP_TYPE_POOL|GUARD_HEAP_TYPE_PAGE);
726 }
727
728 /**
729 Set head Guard and tail Guard for the given memory range.
730
731 @param[in] Memory Base address of memory to set guard for.
732 @param[in] NumberOfPages Memory size in pages.
733
734 @return VOID.
735 **/
736 VOID
737 SetGuardForMemory (
738 IN EFI_PHYSICAL_ADDRESS Memory,
739 IN UINTN NumberOfPages
740 )
741 {
742 EFI_PHYSICAL_ADDRESS GuardPage;
743
744 //
745 // Set tail Guard
746 //
747 GuardPage = Memory + EFI_PAGES_TO_SIZE (NumberOfPages);
748 if (!IsGuardPage (GuardPage)) {
749 SetGuardPage (GuardPage);
750 }
751
752 // Set head Guard
753 GuardPage = Memory - EFI_PAGES_TO_SIZE (1);
754 if (!IsGuardPage (GuardPage)) {
755 SetGuardPage (GuardPage);
756 }
757
758 //
759 // Mark the memory range as Guarded
760 //
761 SetGuardedMemoryBits (Memory, NumberOfPages);
762 }
763
764 /**
765 Unset head Guard and tail Guard for the given memory range.
766
767 @param[in] Memory Base address of memory to unset guard for.
768 @param[in] NumberOfPages Memory size in pages.
769
770 @return VOID.
771 **/
772 VOID
773 UnsetGuardForMemory (
774 IN EFI_PHYSICAL_ADDRESS Memory,
775 IN UINTN NumberOfPages
776 )
777 {
778 EFI_PHYSICAL_ADDRESS GuardPage;
779 UINT64 GuardBitmap;
780
781 if (NumberOfPages == 0) {
782 return;
783 }
784
785 //
786 // Head Guard must be one page before, if any.
787 //
788 // MSB-> 1 0 <-LSB
789 // -------------------
790 // Head Guard -> 0 1 -> Don't free Head Guard (shared Guard)
791 // Head Guard -> 0 0 -> Free Head Guard either (not shared Guard)
792 // 1 X -> Don't free first page (need a new Guard)
793 // (it'll be turned into a Guard page later)
794 // -------------------
795 // Start -> -1 -2
796 //
797 GuardPage = Memory - EFI_PAGES_TO_SIZE (1);
798 GuardBitmap = GetGuardedMemoryBits (Memory - EFI_PAGES_TO_SIZE (2), 2);
799 if ((GuardBitmap & BIT1) == 0) {
800 //
801 // Head Guard exists.
802 //
803 if ((GuardBitmap & BIT0) == 0) {
804 //
805 // If the head Guard is not a tail Guard of adjacent memory block,
806 // unset it.
807 //
808 UnsetGuardPage (GuardPage);
809 }
810 } else {
811 //
812 // Pages before memory to free are still in Guard. It's a partial free
813 // case. Turn first page of memory block to free into a new Guard.
814 //
815 SetGuardPage (Memory);
816 }
817
818 //
819 // Tail Guard must be the page after this memory block to free, if any.
820 //
821 // MSB-> 1 0 <-LSB
822 // --------------------
823 // 1 0 <- Tail Guard -> Don't free Tail Guard (shared Guard)
824 // 0 0 <- Tail Guard -> Free Tail Guard either (not shared Guard)
825 // X 1 -> Don't free last page (need a new Guard)
826 // (it'll be turned into a Guard page later)
827 // --------------------
828 // +1 +0 <- End
829 //
830 GuardPage = Memory + EFI_PAGES_TO_SIZE (NumberOfPages);
831 GuardBitmap = GetGuardedMemoryBits (GuardPage, 2);
832 if ((GuardBitmap & BIT0) == 0) {
833 //
834 // Tail Guard exists.
835 //
836 if ((GuardBitmap & BIT1) == 0) {
837 //
838 // If the tail Guard is not a head Guard of adjacent memory block,
839 // free it; otherwise, keep it.
840 //
841 UnsetGuardPage (GuardPage);
842 }
843 } else {
844 //
845 // Pages after memory to free are still in Guard. It's a partial free
846 // case. We need to keep one page to be a head Guard.
847 //
848 SetGuardPage (GuardPage - EFI_PAGES_TO_SIZE (1));
849 }
850
851 //
852 // No matter what, we just clear the mark of the Guarded memory.
853 //
854 ClearGuardedMemoryBits(Memory, NumberOfPages);
855 }
856
857 /**
858 Adjust address of free memory according to existing and/or required Guard.
859
860 This function will check if there're existing Guard pages of adjacent
861 memory blocks, and try to use it as the Guard page of the memory to be
862 allocated.
863
864 @param[in] Start Start address of free memory block.
865 @param[in] Size Size of free memory block.
866 @param[in] SizeRequested Size of memory to allocate.
867
868 @return The end address of memory block found.
869 @return 0 if no enough space for the required size of memory and its Guard.
870 **/
871 UINT64
872 AdjustMemoryS (
873 IN UINT64 Start,
874 IN UINT64 Size,
875 IN UINT64 SizeRequested
876 )
877 {
878 UINT64 Target;
879
880 Target = Start + Size - SizeRequested;
881
882 //
883 // At least one more page needed for Guard page.
884 //
885 if (Size < (SizeRequested + EFI_PAGES_TO_SIZE (1))) {
886 return 0;
887 }
888
889 if (!IsGuardPage (Start + Size)) {
890 // No Guard at tail to share. One more page is needed.
891 Target -= EFI_PAGES_TO_SIZE (1);
892 }
893
894 // Out of range?
895 if (Target < Start) {
896 return 0;
897 }
898
899 // At the edge?
900 if (Target == Start) {
901 if (!IsGuardPage (Target - EFI_PAGES_TO_SIZE (1))) {
902 // No enough space for a new head Guard if no Guard at head to share.
903 return 0;
904 }
905 }
906
907 // OK, we have enough pages for memory and its Guards. Return the End of the
908 // free space.
909 return Target + SizeRequested - 1;
910 }
911
912 /**
913 Adjust the start address and number of pages to free according to Guard.
914
915 The purpose of this function is to keep the shared Guard page with adjacent
916 memory block if it's still in guard, or free it if no more sharing. Another
917 is to reserve pages as Guard pages in partial page free situation.
918
919 @param[in,out] Memory Base address of memory to free.
920 @param[in,out] NumberOfPages Size of memory to free.
921
922 @return VOID.
923 **/
924 VOID
925 AdjustMemoryF (
926 IN OUT EFI_PHYSICAL_ADDRESS *Memory,
927 IN OUT UINTN *NumberOfPages
928 )
929 {
930 EFI_PHYSICAL_ADDRESS Start;
931 EFI_PHYSICAL_ADDRESS MemoryToTest;
932 UINTN PagesToFree;
933 UINT64 GuardBitmap;
934
935 if (Memory == NULL || NumberOfPages == NULL || *NumberOfPages == 0) {
936 return;
937 }
938
939 Start = *Memory;
940 PagesToFree = *NumberOfPages;
941
942 //
943 // Head Guard must be one page before, if any.
944 //
945 // MSB-> 1 0 <-LSB
946 // -------------------
947 // Head Guard -> 0 1 -> Don't free Head Guard (shared Guard)
948 // Head Guard -> 0 0 -> Free Head Guard either (not shared Guard)
949 // 1 X -> Don't free first page (need a new Guard)
950 // (it'll be turned into a Guard page later)
951 // -------------------
952 // Start -> -1 -2
953 //
954 MemoryToTest = Start - EFI_PAGES_TO_SIZE (2);
955 GuardBitmap = GetGuardedMemoryBits (MemoryToTest, 2);
956 if ((GuardBitmap & BIT1) == 0) {
957 //
958 // Head Guard exists.
959 //
960 if ((GuardBitmap & BIT0) == 0) {
961 //
962 // If the head Guard is not a tail Guard of adjacent memory block,
963 // free it; otherwise, keep it.
964 //
965 Start -= EFI_PAGES_TO_SIZE (1);
966 PagesToFree += 1;
967 }
968 } else {
969 //
970 // No Head Guard, and pages before memory to free are still in Guard. It's a
971 // partial free case. We need to keep one page to be a tail Guard.
972 //
973 Start += EFI_PAGES_TO_SIZE (1);
974 PagesToFree -= 1;
975 }
976
977 //
978 // Tail Guard must be the page after this memory block to free, if any.
979 //
980 // MSB-> 1 0 <-LSB
981 // --------------------
982 // 1 0 <- Tail Guard -> Don't free Tail Guard (shared Guard)
983 // 0 0 <- Tail Guard -> Free Tail Guard either (not shared Guard)
984 // X 1 -> Don't free last page (need a new Guard)
985 // (it'll be turned into a Guard page later)
986 // --------------------
987 // +1 +0 <- End
988 //
989 MemoryToTest = Start + EFI_PAGES_TO_SIZE (PagesToFree);
990 GuardBitmap = GetGuardedMemoryBits (MemoryToTest, 2);
991 if ((GuardBitmap & BIT0) == 0) {
992 //
993 // Tail Guard exists.
994 //
995 if ((GuardBitmap & BIT1) == 0) {
996 //
997 // If the tail Guard is not a head Guard of adjacent memory block,
998 // free it; otherwise, keep it.
999 //
1000 PagesToFree += 1;
1001 }
1002 } else if (PagesToFree > 0) {
1003 //
1004 // No Tail Guard, and pages after memory to free are still in Guard. It's a
1005 // partial free case. We need to keep one page to be a head Guard.
1006 //
1007 PagesToFree -= 1;
1008 }
1009
1010 *Memory = Start;
1011 *NumberOfPages = PagesToFree;
1012 }
1013
1014 /**
1015 Adjust the base and number of pages to really allocate according to Guard.
1016
1017 @param[in,out] Memory Base address of free memory.
1018 @param[in,out] NumberOfPages Size of memory to allocate.
1019
1020 @return VOID.
1021 **/
1022 VOID
1023 AdjustMemoryA (
1024 IN OUT EFI_PHYSICAL_ADDRESS *Memory,
1025 IN OUT UINTN *NumberOfPages
1026 )
1027 {
1028 //
1029 // FindFreePages() has already taken the Guard into account. It's safe to
1030 // adjust the start address and/or number of pages here, to make sure that
1031 // the Guards are also "allocated".
1032 //
1033 if (!IsGuardPage (*Memory + EFI_PAGES_TO_SIZE (*NumberOfPages))) {
1034 // No tail Guard, add one.
1035 *NumberOfPages += 1;
1036 }
1037
1038 if (!IsGuardPage (*Memory - EFI_PAGE_SIZE)) {
1039 // No head Guard, add one.
1040 *Memory -= EFI_PAGE_SIZE;
1041 *NumberOfPages += 1;
1042 }
1043 }
1044
1045 /**
1046 Adjust the pool head position to make sure the Guard page is adjavent to
1047 pool tail or pool head.
1048
1049 @param[in] Memory Base address of memory allocated.
1050 @param[in] NoPages Number of pages actually allocated.
1051 @param[in] Size Size of memory requested.
1052 (plus pool head/tail overhead)
1053
1054 @return Address of pool head
1055 **/
1056 VOID *
1057 AdjustPoolHeadA (
1058 IN EFI_PHYSICAL_ADDRESS Memory,
1059 IN UINTN NoPages,
1060 IN UINTN Size
1061 )
1062 {
1063 if ((PcdGet8 (PcdHeapGuardPropertyMask) & BIT7) != 0) {
1064 //
1065 // Pool head is put near the head Guard
1066 //
1067 return (VOID *)(UINTN)Memory;
1068 }
1069
1070 //
1071 // Pool head is put near the tail Guard
1072 //
1073 return (VOID *)(UINTN)(Memory + EFI_PAGES_TO_SIZE (NoPages) - Size);
1074 }
1075
1076 /**
1077 Get the page base address according to pool head address.
1078
1079 @param[in] Memory Head address of pool to free.
1080
1081 @return Address of pool head.
1082 **/
1083 VOID *
1084 AdjustPoolHeadF (
1085 IN EFI_PHYSICAL_ADDRESS Memory
1086 )
1087 {
1088 if ((PcdGet8 (PcdHeapGuardPropertyMask) & BIT7) != 0) {
1089 //
1090 // Pool head is put near the head Guard
1091 //
1092 return (VOID *)(UINTN)Memory;
1093 }
1094
1095 //
1096 // Pool head is put near the tail Guard
1097 //
1098 return (VOID *)(UINTN)(Memory & ~EFI_PAGE_MASK);
1099 }
1100
1101 /**
1102 Helper function of memory allocation with Guard pages.
1103
1104 @param FreePageList The free page node.
1105 @param NumberOfPages Number of pages to be allocated.
1106 @param MaxAddress Request to allocate memory below this address.
1107 @param MemoryType Type of memory requested.
1108
1109 @return Memory address of allocated pages.
1110 **/
1111 UINTN
1112 InternalAllocMaxAddressWithGuard (
1113 IN OUT LIST_ENTRY *FreePageList,
1114 IN UINTN NumberOfPages,
1115 IN UINTN MaxAddress,
1116 IN EFI_MEMORY_TYPE MemoryType
1117
1118 )
1119 {
1120 LIST_ENTRY *Node;
1121 FREE_PAGE_LIST *Pages;
1122 UINTN PagesToAlloc;
1123 UINTN HeadGuard;
1124 UINTN TailGuard;
1125 UINTN Address;
1126
1127 for (Node = FreePageList->BackLink; Node != FreePageList;
1128 Node = Node->BackLink) {
1129 Pages = BASE_CR (Node, FREE_PAGE_LIST, Link);
1130 if (Pages->NumberOfPages >= NumberOfPages &&
1131 (UINTN)Pages + EFI_PAGES_TO_SIZE (NumberOfPages) - 1 <= MaxAddress) {
1132
1133 //
1134 // We may need 1 or 2 more pages for Guard. Check it out.
1135 //
1136 PagesToAlloc = NumberOfPages;
1137 TailGuard = (UINTN)Pages + EFI_PAGES_TO_SIZE (Pages->NumberOfPages);
1138 if (!IsGuardPage (TailGuard)) {
1139 //
1140 // Add one if no Guard at the end of current free memory block.
1141 //
1142 PagesToAlloc += 1;
1143 TailGuard = 0;
1144 }
1145
1146 HeadGuard = (UINTN)Pages +
1147 EFI_PAGES_TO_SIZE (Pages->NumberOfPages - PagesToAlloc) -
1148 EFI_PAGE_SIZE;
1149 if (!IsGuardPage (HeadGuard)) {
1150 //
1151 // Add one if no Guard at the page before the address to allocate
1152 //
1153 PagesToAlloc += 1;
1154 HeadGuard = 0;
1155 }
1156
1157 if (Pages->NumberOfPages < PagesToAlloc) {
1158 // Not enough space to allocate memory with Guards? Try next block.
1159 continue;
1160 }
1161
1162 Address = InternalAllocPagesOnOneNode (Pages, PagesToAlloc, MaxAddress);
1163 ConvertSmmMemoryMapEntry(MemoryType, Address, PagesToAlloc, FALSE);
1164 CoreFreeMemoryMapStack();
1165 if (HeadGuard == 0) {
1166 // Don't pass the Guard page to user.
1167 Address += EFI_PAGE_SIZE;
1168 }
1169 SetGuardForMemory (Address, NumberOfPages);
1170 return Address;
1171 }
1172 }
1173
1174 return (UINTN)(-1);
1175 }
1176
1177 /**
1178 Helper function of memory free with Guard pages.
1179
1180 @param[in] Memory Base address of memory being freed.
1181 @param[in] NumberOfPages The number of pages to free.
1182 @param[in] AddRegion If this memory is new added region.
1183
1184 @retval EFI_NOT_FOUND Could not find the entry that covers the range.
1185 @retval EFI_INVALID_PARAMETER Address not aligned, Address is zero or NumberOfPages is zero.
1186 @return EFI_SUCCESS Pages successfully freed.
1187 **/
1188 EFI_STATUS
1189 SmmInternalFreePagesExWithGuard (
1190 IN EFI_PHYSICAL_ADDRESS Memory,
1191 IN UINTN NumberOfPages,
1192 IN BOOLEAN AddRegion
1193 )
1194 {
1195 EFI_PHYSICAL_ADDRESS MemoryToFree;
1196 UINTN PagesToFree;
1197
1198 MemoryToFree = Memory;
1199 PagesToFree = NumberOfPages;
1200
1201 AdjustMemoryF (&MemoryToFree, &PagesToFree);
1202 UnsetGuardForMemory (Memory, NumberOfPages);
1203 if (PagesToFree == 0) {
1204 return EFI_SUCCESS;
1205 }
1206
1207 return SmmInternalFreePagesEx (MemoryToFree, PagesToFree, AddRegion);
1208 }
1209
1210 /**
1211 Set all Guard pages which cannot be set during the non-SMM mode time.
1212 **/
1213 VOID
1214 SetAllGuardPages (
1215 VOID
1216 )
1217 {
1218 UINTN Entries[GUARDED_HEAP_MAP_TABLE_DEPTH];
1219 UINTN Shifts[GUARDED_HEAP_MAP_TABLE_DEPTH];
1220 UINTN Indices[GUARDED_HEAP_MAP_TABLE_DEPTH];
1221 UINT64 Tables[GUARDED_HEAP_MAP_TABLE_DEPTH];
1222 UINT64 Addresses[GUARDED_HEAP_MAP_TABLE_DEPTH];
1223 UINT64 TableEntry;
1224 UINT64 Address;
1225 UINT64 GuardPage;
1226 INTN Level;
1227 UINTN Index;
1228 BOOLEAN OnGuarding;
1229
1230 if (mGuardedMemoryMap == 0 ||
1231 mMapLevel == 0 ||
1232 mMapLevel > GUARDED_HEAP_MAP_TABLE_DEPTH) {
1233 return;
1234 }
1235
1236 CopyMem (Entries, mLevelMask, sizeof (Entries));
1237 CopyMem (Shifts, mLevelShift, sizeof (Shifts));
1238
1239 SetMem (Tables, sizeof(Tables), 0);
1240 SetMem (Addresses, sizeof(Addresses), 0);
1241 SetMem (Indices, sizeof(Indices), 0);
1242
1243 Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;
1244 Tables[Level] = mGuardedMemoryMap;
1245 Address = 0;
1246 OnGuarding = FALSE;
1247
1248 DEBUG_CODE (
1249 DumpGuardedMemoryBitmap ();
1250 );
1251
1252 while (TRUE) {
1253 if (Indices[Level] > Entries[Level]) {
1254 Tables[Level] = 0;
1255 Level -= 1;
1256 } else {
1257
1258 TableEntry = ((UINT64 *)(UINTN)(Tables[Level]))[Indices[Level]];
1259 Address = Addresses[Level];
1260
1261 if (TableEntry == 0) {
1262
1263 OnGuarding = FALSE;
1264
1265 } else if (Level < GUARDED_HEAP_MAP_TABLE_DEPTH - 1) {
1266
1267 Level += 1;
1268 Tables[Level] = TableEntry;
1269 Addresses[Level] = Address;
1270 Indices[Level] = 0;
1271
1272 continue;
1273
1274 } else {
1275
1276 Index = 0;
1277 while (Index < GUARDED_HEAP_MAP_ENTRY_BITS) {
1278 if ((TableEntry & 1) == 1) {
1279 if (OnGuarding) {
1280 GuardPage = 0;
1281 } else {
1282 GuardPage = Address - EFI_PAGE_SIZE;
1283 }
1284 OnGuarding = TRUE;
1285 } else {
1286 if (OnGuarding) {
1287 GuardPage = Address;
1288 } else {
1289 GuardPage = 0;
1290 }
1291 OnGuarding = FALSE;
1292 }
1293
1294 if (GuardPage != 0) {
1295 SetGuardPage (GuardPage);
1296 }
1297
1298 if (TableEntry == 0) {
1299 break;
1300 }
1301
1302 TableEntry = RShiftU64 (TableEntry, 1);
1303 Address += EFI_PAGE_SIZE;
1304 Index += 1;
1305 }
1306 }
1307 }
1308
1309 if (Level < (GUARDED_HEAP_MAP_TABLE_DEPTH - (INTN)mMapLevel)) {
1310 break;
1311 }
1312
1313 Indices[Level] += 1;
1314 Address = (Level == 0) ? 0 : Addresses[Level - 1];
1315 Addresses[Level] = Address | LShiftU64(Indices[Level], Shifts[Level]);
1316
1317 }
1318 }
1319
1320 /**
1321 Hook function used to set all Guard pages after entering SMM mode.
1322 **/
1323 VOID
1324 SmmEntryPointMemoryManagementHook (
1325 VOID
1326 )
1327 {
1328 EFI_STATUS Status;
1329
1330 if (mSmmMemoryAttribute == NULL) {
1331 Status = SmmLocateProtocol (
1332 &gEdkiiSmmMemoryAttributeProtocolGuid,
1333 NULL,
1334 (VOID **)&mSmmMemoryAttribute
1335 );
1336 if (!EFI_ERROR(Status)) {
1337 SetAllGuardPages ();
1338 }
1339 }
1340 }
1341
1342 /**
1343 Helper function to convert a UINT64 value in binary to a string.
1344
1345 @param[in] Value Value of a UINT64 integer.
1346 @param[out] BinString String buffer to contain the conversion result.
1347
1348 @return VOID.
1349 **/
1350 VOID
1351 Uint64ToBinString (
1352 IN UINT64 Value,
1353 OUT CHAR8 *BinString
1354 )
1355 {
1356 UINTN Index;
1357
1358 if (BinString == NULL) {
1359 return;
1360 }
1361
1362 for (Index = 64; Index > 0; --Index) {
1363 BinString[Index - 1] = '0' + (Value & 1);
1364 Value = RShiftU64 (Value, 1);
1365 }
1366 BinString[64] = '\0';
1367 }
1368
1369 /**
1370 Dump the guarded memory bit map.
1371 **/
1372 VOID
1373 EFIAPI
1374 DumpGuardedMemoryBitmap (
1375 VOID
1376 )
1377 {
1378 UINTN Entries[GUARDED_HEAP_MAP_TABLE_DEPTH];
1379 UINTN Shifts[GUARDED_HEAP_MAP_TABLE_DEPTH];
1380 UINTN Indices[GUARDED_HEAP_MAP_TABLE_DEPTH];
1381 UINT64 Tables[GUARDED_HEAP_MAP_TABLE_DEPTH];
1382 UINT64 Addresses[GUARDED_HEAP_MAP_TABLE_DEPTH];
1383 UINT64 TableEntry;
1384 UINT64 Address;
1385 INTN Level;
1386 UINTN RepeatZero;
1387 CHAR8 String[GUARDED_HEAP_MAP_ENTRY_BITS + 1];
1388 CHAR8 *Ruler1;
1389 CHAR8 *Ruler2;
1390
1391 if (mGuardedMemoryMap == 0 ||
1392 mMapLevel == 0 ||
1393 mMapLevel > GUARDED_HEAP_MAP_TABLE_DEPTH) {
1394 return;
1395 }
1396
1397 Ruler1 = " 3 2 1 0";
1398 Ruler2 = "FEDCBA9876543210FEDCBA9876543210FEDCBA9876543210FEDCBA9876543210";
1399
1400 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "============================="
1401 " Guarded Memory Bitmap "
1402 "==============================\r\n"));
1403 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, " %a\r\n", Ruler1));
1404 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, " %a\r\n", Ruler2));
1405
1406 CopyMem (Entries, mLevelMask, sizeof (Entries));
1407 CopyMem (Shifts, mLevelShift, sizeof (Shifts));
1408
1409 SetMem (Indices, sizeof(Indices), 0);
1410 SetMem (Tables, sizeof(Tables), 0);
1411 SetMem (Addresses, sizeof(Addresses), 0);
1412
1413 Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;
1414 Tables[Level] = mGuardedMemoryMap;
1415 Address = 0;
1416 RepeatZero = 0;
1417
1418 while (TRUE) {
1419 if (Indices[Level] > Entries[Level]) {
1420
1421 Tables[Level] = 0;
1422 Level -= 1;
1423 RepeatZero = 0;
1424
1425 DEBUG ((
1426 HEAP_GUARD_DEBUG_LEVEL,
1427 "========================================="
1428 "=========================================\r\n"
1429 ));
1430
1431 } else {
1432
1433 TableEntry = ((UINT64 *)(UINTN)Tables[Level])[Indices[Level]];
1434 Address = Addresses[Level];
1435
1436 if (TableEntry == 0) {
1437
1438 if (Level == GUARDED_HEAP_MAP_TABLE_DEPTH - 1) {
1439 if (RepeatZero == 0) {
1440 Uint64ToBinString(TableEntry, String);
1441 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "%016lx: %a\r\n", Address, String));
1442 } else if (RepeatZero == 1) {
1443 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "... : ...\r\n"));
1444 }
1445 RepeatZero += 1;
1446 }
1447
1448 } else if (Level < GUARDED_HEAP_MAP_TABLE_DEPTH - 1) {
1449
1450 Level += 1;
1451 Tables[Level] = TableEntry;
1452 Addresses[Level] = Address;
1453 Indices[Level] = 0;
1454 RepeatZero = 0;
1455
1456 continue;
1457
1458 } else {
1459
1460 RepeatZero = 0;
1461 Uint64ToBinString(TableEntry, String);
1462 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "%016lx: %a\r\n", Address, String));
1463
1464 }
1465 }
1466
1467 if (Level < (GUARDED_HEAP_MAP_TABLE_DEPTH - (INTN)mMapLevel)) {
1468 break;
1469 }
1470
1471 Indices[Level] += 1;
1472 Address = (Level == 0) ? 0 : Addresses[Level - 1];
1473 Addresses[Level] = Address | LShiftU64(Indices[Level], Shifts[Level]);
1474
1475 }
1476 }
1477
1478 /**
1479 Debug function used to verify if the Guard page is well set or not.
1480
1481 @param[in] BaseAddress Address of memory to check.
1482 @param[in] NumberOfPages Size of memory in pages.
1483
1484 @return TRUE The head Guard and tail Guard are both well set.
1485 @return FALSE The head Guard and/or tail Guard are not well set.
1486 **/
1487 BOOLEAN
1488 VerifyMemoryGuard (
1489 IN EFI_PHYSICAL_ADDRESS BaseAddress,
1490 IN UINTN NumberOfPages
1491 )
1492 {
1493 EFI_STATUS Status;
1494 UINT64 Attribute;
1495 EFI_PHYSICAL_ADDRESS Address;
1496
1497 if (mSmmMemoryAttribute == NULL) {
1498 return TRUE;
1499 }
1500
1501 Attribute = 0;
1502 Address = BaseAddress - EFI_PAGE_SIZE;
1503 Status = mSmmMemoryAttribute->GetMemoryAttributes (
1504 mSmmMemoryAttribute,
1505 Address,
1506 EFI_PAGE_SIZE,
1507 &Attribute
1508 );
1509 if (EFI_ERROR (Status) || (Attribute & EFI_MEMORY_RP) == 0) {
1510 DEBUG ((DEBUG_ERROR, "Head Guard is not set at: %016lx (%016lX)!!!\r\n",
1511 Address, Attribute));
1512 DumpGuardedMemoryBitmap ();
1513 return FALSE;
1514 }
1515
1516 Attribute = 0;
1517 Address = BaseAddress + EFI_PAGES_TO_SIZE (NumberOfPages);
1518 Status = mSmmMemoryAttribute->GetMemoryAttributes (
1519 mSmmMemoryAttribute,
1520 Address,
1521 EFI_PAGE_SIZE,
1522 &Attribute
1523 );
1524 if (EFI_ERROR (Status) || (Attribute & EFI_MEMORY_RP) == 0) {
1525 DEBUG ((DEBUG_ERROR, "Tail Guard is not set at: %016lx (%016lX)!!!\r\n",
1526 Address, Attribute));
1527 DumpGuardedMemoryBitmap ();
1528 return FALSE;
1529 }
1530
1531 return TRUE;
1532 }
1533