]> git.proxmox.com Git - mirror_edk2.git/blob - MdeModulePkg/Core/PiSmmCore/HeapGuard.c
MdeModulePkg/PiSmmCore: fix bits operation error on a boundary condition
[mirror_edk2.git] / MdeModulePkg / Core / PiSmmCore / HeapGuard.c
1 /** @file
2 UEFI Heap Guard functions.
3
4 Copyright (c) 2017-2018, Intel Corporation. All rights reserved.<BR>
5 This program and the accompanying materials
6 are licensed and made available under the terms and conditions of the BSD License
7 which accompanies this distribution. The full text of the license may be found at
8 http://opensource.org/licenses/bsd-license.php
9
10 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
11 WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
12
13 **/
14
15 #include "HeapGuard.h"
16
17 //
18 // Global to avoid infinite reentrance of memory allocation when updating
19 // page table attributes, which may need allocating pages for new PDE/PTE.
20 //
21 GLOBAL_REMOVE_IF_UNREFERENCED BOOLEAN mOnGuarding = FALSE;
22
23 //
24 // Pointer to table tracking the Guarded memory with bitmap, in which '1'
25 // is used to indicate memory guarded. '0' might be free memory or Guard
26 // page itself, depending on status of memory adjacent to it.
27 //
28 GLOBAL_REMOVE_IF_UNREFERENCED UINT64 mGuardedMemoryMap = 0;
29
30 //
31 // Current depth level of map table pointed by mGuardedMemoryMap.
32 // mMapLevel must be initialized at least by 1. It will be automatically
33 // updated according to the address of memory just tracked.
34 //
35 GLOBAL_REMOVE_IF_UNREFERENCED UINTN mMapLevel = 1;
36
37 //
38 // Shift and mask for each level of map table
39 //
40 GLOBAL_REMOVE_IF_UNREFERENCED UINTN mLevelShift[GUARDED_HEAP_MAP_TABLE_DEPTH]
41 = GUARDED_HEAP_MAP_TABLE_DEPTH_SHIFTS;
42 GLOBAL_REMOVE_IF_UNREFERENCED UINTN mLevelMask[GUARDED_HEAP_MAP_TABLE_DEPTH]
43 = GUARDED_HEAP_MAP_TABLE_DEPTH_MASKS;
44
45 //
46 // SMM memory attribute protocol
47 //
48 EDKII_SMM_MEMORY_ATTRIBUTE_PROTOCOL *mSmmMemoryAttribute = NULL;
49
50 /**
51 Set corresponding bits in bitmap table to 1 according to the address.
52
53 @param[in] Address Start address to set for.
54 @param[in] BitNumber Number of bits to set.
55 @param[in] BitMap Pointer to bitmap which covers the Address.
56
57 @return VOID
58 **/
59 STATIC
60 VOID
61 SetBits (
62 IN EFI_PHYSICAL_ADDRESS Address,
63 IN UINTN BitNumber,
64 IN UINT64 *BitMap
65 )
66 {
67 UINTN Lsbs;
68 UINTN Qwords;
69 UINTN Msbs;
70 UINTN StartBit;
71 UINTN EndBit;
72
73 StartBit = (UINTN)GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address);
74 EndBit = (StartBit + BitNumber - 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
75
76 if ((StartBit + BitNumber) >= GUARDED_HEAP_MAP_ENTRY_BITS) {
77 Msbs = (GUARDED_HEAP_MAP_ENTRY_BITS - StartBit) %
78 GUARDED_HEAP_MAP_ENTRY_BITS;
79 Lsbs = (EndBit + 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
80 Qwords = (BitNumber - Msbs) / GUARDED_HEAP_MAP_ENTRY_BITS;
81 } else {
82 Msbs = BitNumber;
83 Lsbs = 0;
84 Qwords = 0;
85 }
86
87 if (Msbs > 0) {
88 *BitMap |= LShiftU64 (LShiftU64 (1, Msbs) - 1, StartBit);
89 BitMap += 1;
90 }
91
92 if (Qwords > 0) {
93 SetMem64 ((VOID *)BitMap, Qwords * GUARDED_HEAP_MAP_ENTRY_BYTES,
94 (UINT64)-1);
95 BitMap += Qwords;
96 }
97
98 if (Lsbs > 0) {
99 *BitMap |= (LShiftU64 (1, Lsbs) - 1);
100 }
101 }
102
103 /**
104 Set corresponding bits in bitmap table to 0 according to the address.
105
106 @param[in] Address Start address to set for.
107 @param[in] BitNumber Number of bits to set.
108 @param[in] BitMap Pointer to bitmap which covers the Address.
109
110 @return VOID.
111 **/
112 STATIC
113 VOID
114 ClearBits (
115 IN EFI_PHYSICAL_ADDRESS Address,
116 IN UINTN BitNumber,
117 IN UINT64 *BitMap
118 )
119 {
120 UINTN Lsbs;
121 UINTN Qwords;
122 UINTN Msbs;
123 UINTN StartBit;
124 UINTN EndBit;
125
126 StartBit = (UINTN)GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address);
127 EndBit = (StartBit + BitNumber - 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
128
129 if ((StartBit + BitNumber) >= GUARDED_HEAP_MAP_ENTRY_BITS) {
130 Msbs = (GUARDED_HEAP_MAP_ENTRY_BITS - StartBit) %
131 GUARDED_HEAP_MAP_ENTRY_BITS;
132 Lsbs = (EndBit + 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
133 Qwords = (BitNumber - Msbs) / GUARDED_HEAP_MAP_ENTRY_BITS;
134 } else {
135 Msbs = BitNumber;
136 Lsbs = 0;
137 Qwords = 0;
138 }
139
140 if (Msbs > 0) {
141 *BitMap &= ~LShiftU64 (LShiftU64 (1, Msbs) - 1, StartBit);
142 BitMap += 1;
143 }
144
145 if (Qwords > 0) {
146 SetMem64 ((VOID *)BitMap, Qwords * GUARDED_HEAP_MAP_ENTRY_BYTES, 0);
147 BitMap += Qwords;
148 }
149
150 if (Lsbs > 0) {
151 *BitMap &= ~(LShiftU64 (1, Lsbs) - 1);
152 }
153 }
154
155 /**
156 Get corresponding bits in bitmap table according to the address.
157
158 The value of bit 0 corresponds to the status of memory at given Address.
159 No more than 64 bits can be retrieved in one call.
160
161 @param[in] Address Start address to retrieve bits for.
162 @param[in] BitNumber Number of bits to get.
163 @param[in] BitMap Pointer to bitmap which covers the Address.
164
165 @return An integer containing the bits information.
166 **/
167 STATIC
168 UINT64
169 GetBits (
170 IN EFI_PHYSICAL_ADDRESS Address,
171 IN UINTN BitNumber,
172 IN UINT64 *BitMap
173 )
174 {
175 UINTN StartBit;
176 UINTN EndBit;
177 UINTN Lsbs;
178 UINTN Msbs;
179 UINT64 Result;
180
181 ASSERT (BitNumber <= GUARDED_HEAP_MAP_ENTRY_BITS);
182
183 StartBit = (UINTN)GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address);
184 EndBit = (StartBit + BitNumber - 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
185
186 if ((StartBit + BitNumber) > GUARDED_HEAP_MAP_ENTRY_BITS) {
187 Msbs = GUARDED_HEAP_MAP_ENTRY_BITS - StartBit;
188 Lsbs = (EndBit + 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
189 } else {
190 Msbs = BitNumber;
191 Lsbs = 0;
192 }
193
194 if (StartBit == 0 && BitNumber == GUARDED_HEAP_MAP_ENTRY_BITS) {
195 Result = *BitMap;
196 } else {
197 Result = RShiftU64((*BitMap), StartBit) & (LShiftU64(1, Msbs) - 1);
198 if (Lsbs > 0) {
199 BitMap += 1;
200 Result |= LShiftU64 ((*BitMap) & (LShiftU64 (1, Lsbs) - 1), Msbs);
201 }
202 }
203
204 return Result;
205 }
206
207 /**
208 Helper function to allocate pages without Guard for internal uses.
209
210 @param[in] Pages Page number.
211
212 @return Address of memory allocated.
213 **/
214 VOID *
215 PageAlloc (
216 IN UINTN Pages
217 )
218 {
219 EFI_STATUS Status;
220 EFI_PHYSICAL_ADDRESS Memory;
221
222 Status = SmmInternalAllocatePages (AllocateAnyPages, EfiRuntimeServicesData,
223 Pages, &Memory, FALSE);
224 if (EFI_ERROR (Status)) {
225 Memory = 0;
226 }
227
228 return (VOID *)(UINTN)Memory;
229 }
230
231 /**
232 Locate the pointer of bitmap from the guarded memory bitmap tables, which
233 covers the given Address.
234
235 @param[in] Address Start address to search the bitmap for.
236 @param[in] AllocMapUnit Flag to indicate memory allocation for the table.
237 @param[out] BitMap Pointer to bitmap which covers the Address.
238
239 @return The bit number from given Address to the end of current map table.
240 **/
241 UINTN
242 FindGuardedMemoryMap (
243 IN EFI_PHYSICAL_ADDRESS Address,
244 IN BOOLEAN AllocMapUnit,
245 OUT UINT64 **BitMap
246 )
247 {
248 UINTN Level;
249 UINT64 *GuardMap;
250 UINT64 MapMemory;
251 UINTN Index;
252 UINTN Size;
253 UINTN BitsToUnitEnd;
254
255 //
256 // Adjust current map table depth according to the address to access
257 //
258 while (AllocMapUnit &&
259 mMapLevel < GUARDED_HEAP_MAP_TABLE_DEPTH &&
260 RShiftU64 (
261 Address,
262 mLevelShift[GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel - 1]
263 ) != 0) {
264
265 if (mGuardedMemoryMap != 0) {
266 Size = (mLevelMask[GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel - 1] + 1)
267 * GUARDED_HEAP_MAP_ENTRY_BYTES;
268 MapMemory = (UINT64)(UINTN)PageAlloc (EFI_SIZE_TO_PAGES (Size));
269 ASSERT (MapMemory != 0);
270
271 SetMem ((VOID *)(UINTN)MapMemory, Size, 0);
272
273 *(UINT64 *)(UINTN)MapMemory = mGuardedMemoryMap;
274 mGuardedMemoryMap = MapMemory;
275 }
276
277 mMapLevel++;
278
279 }
280
281 GuardMap = &mGuardedMemoryMap;
282 for (Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;
283 Level < GUARDED_HEAP_MAP_TABLE_DEPTH;
284 ++Level) {
285
286 if (*GuardMap == 0) {
287 if (!AllocMapUnit) {
288 GuardMap = NULL;
289 break;
290 }
291
292 Size = (mLevelMask[Level] + 1) * GUARDED_HEAP_MAP_ENTRY_BYTES;
293 MapMemory = (UINT64)(UINTN)PageAlloc (EFI_SIZE_TO_PAGES (Size));
294 ASSERT (MapMemory != 0);
295
296 SetMem ((VOID *)(UINTN)MapMemory, Size, 0);
297 *GuardMap = MapMemory;
298 }
299
300 Index = (UINTN)RShiftU64 (Address, mLevelShift[Level]);
301 Index &= mLevelMask[Level];
302 GuardMap = (UINT64 *)(UINTN)((*GuardMap) + Index * sizeof (UINT64));
303
304 }
305
306 BitsToUnitEnd = GUARDED_HEAP_MAP_BITS - GUARDED_HEAP_MAP_BIT_INDEX (Address);
307 *BitMap = GuardMap;
308
309 return BitsToUnitEnd;
310 }
311
312 /**
313 Set corresponding bits in bitmap table to 1 according to given memory range.
314
315 @param[in] Address Memory address to guard from.
316 @param[in] NumberOfPages Number of pages to guard.
317
318 @return VOID
319 **/
320 VOID
321 EFIAPI
322 SetGuardedMemoryBits (
323 IN EFI_PHYSICAL_ADDRESS Address,
324 IN UINTN NumberOfPages
325 )
326 {
327 UINT64 *BitMap;
328 UINTN Bits;
329 UINTN BitsToUnitEnd;
330
331 while (NumberOfPages > 0) {
332 BitsToUnitEnd = FindGuardedMemoryMap (Address, TRUE, &BitMap);
333 ASSERT (BitMap != NULL);
334
335 if (NumberOfPages > BitsToUnitEnd) {
336 // Cross map unit
337 Bits = BitsToUnitEnd;
338 } else {
339 Bits = NumberOfPages;
340 }
341
342 SetBits (Address, Bits, BitMap);
343
344 NumberOfPages -= Bits;
345 Address += EFI_PAGES_TO_SIZE (Bits);
346 }
347 }
348
349 /**
350 Clear corresponding bits in bitmap table according to given memory range.
351
352 @param[in] Address Memory address to unset from.
353 @param[in] NumberOfPages Number of pages to unset guard.
354
355 @return VOID
356 **/
357 VOID
358 EFIAPI
359 ClearGuardedMemoryBits (
360 IN EFI_PHYSICAL_ADDRESS Address,
361 IN UINTN NumberOfPages
362 )
363 {
364 UINT64 *BitMap;
365 UINTN Bits;
366 UINTN BitsToUnitEnd;
367
368 while (NumberOfPages > 0) {
369 BitsToUnitEnd = FindGuardedMemoryMap (Address, TRUE, &BitMap);
370 ASSERT (BitMap != NULL);
371
372 if (NumberOfPages > BitsToUnitEnd) {
373 // Cross map unit
374 Bits = BitsToUnitEnd;
375 } else {
376 Bits = NumberOfPages;
377 }
378
379 ClearBits (Address, Bits, BitMap);
380
381 NumberOfPages -= Bits;
382 Address += EFI_PAGES_TO_SIZE (Bits);
383 }
384 }
385
386 /**
387 Retrieve corresponding bits in bitmap table according to given memory range.
388
389 @param[in] Address Memory address to retrieve from.
390 @param[in] NumberOfPages Number of pages to retrieve.
391
392 @return An integer containing the guarded memory bitmap.
393 **/
394 UINTN
395 GetGuardedMemoryBits (
396 IN EFI_PHYSICAL_ADDRESS Address,
397 IN UINTN NumberOfPages
398 )
399 {
400 UINT64 *BitMap;
401 UINTN Bits;
402 UINTN Result;
403 UINTN Shift;
404 UINTN BitsToUnitEnd;
405
406 ASSERT (NumberOfPages <= GUARDED_HEAP_MAP_ENTRY_BITS);
407
408 Result = 0;
409 Shift = 0;
410 while (NumberOfPages > 0) {
411 BitsToUnitEnd = FindGuardedMemoryMap (Address, FALSE, &BitMap);
412
413 if (NumberOfPages > BitsToUnitEnd) {
414 // Cross map unit
415 Bits = BitsToUnitEnd;
416 } else {
417 Bits = NumberOfPages;
418 }
419
420 if (BitMap != NULL) {
421 Result |= LShiftU64 (GetBits (Address, Bits, BitMap), Shift);
422 }
423
424 Shift += Bits;
425 NumberOfPages -= Bits;
426 Address += EFI_PAGES_TO_SIZE (Bits);
427 }
428
429 return Result;
430 }
431
432 /**
433 Get bit value in bitmap table for the given address.
434
435 @param[in] Address The address to retrieve for.
436
437 @return 1 or 0.
438 **/
439 UINTN
440 EFIAPI
441 GetGuardMapBit (
442 IN EFI_PHYSICAL_ADDRESS Address
443 )
444 {
445 UINT64 *GuardMap;
446
447 FindGuardedMemoryMap (Address, FALSE, &GuardMap);
448 if (GuardMap != NULL) {
449 if (RShiftU64 (*GuardMap,
450 GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address)) & 1) {
451 return 1;
452 }
453 }
454
455 return 0;
456 }
457
458 /**
459 Set the bit in bitmap table for the given address.
460
461 @param[in] Address The address to set for.
462
463 @return VOID.
464 **/
465 VOID
466 EFIAPI
467 SetGuardMapBit (
468 IN EFI_PHYSICAL_ADDRESS Address
469 )
470 {
471 UINT64 *GuardMap;
472 UINT64 BitMask;
473
474 FindGuardedMemoryMap (Address, TRUE, &GuardMap);
475 if (GuardMap != NULL) {
476 BitMask = LShiftU64 (1, GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address));
477 *GuardMap |= BitMask;
478 }
479 }
480
481 /**
482 Clear the bit in bitmap table for the given address.
483
484 @param[in] Address The address to clear for.
485
486 @return VOID.
487 **/
488 VOID
489 EFIAPI
490 ClearGuardMapBit (
491 IN EFI_PHYSICAL_ADDRESS Address
492 )
493 {
494 UINT64 *GuardMap;
495 UINT64 BitMask;
496
497 FindGuardedMemoryMap (Address, TRUE, &GuardMap);
498 if (GuardMap != NULL) {
499 BitMask = LShiftU64 (1, GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address));
500 *GuardMap &= ~BitMask;
501 }
502 }
503
504 /**
505 Check to see if the page at the given address is a Guard page or not.
506
507 @param[in] Address The address to check for.
508
509 @return TRUE The page at Address is a Guard page.
510 @return FALSE The page at Address is not a Guard page.
511 **/
512 BOOLEAN
513 EFIAPI
514 IsGuardPage (
515 IN EFI_PHYSICAL_ADDRESS Address
516 )
517 {
518 UINTN BitMap;
519
520 //
521 // There must be at least one guarded page before and/or after given
522 // address if it's a Guard page. The bitmap pattern should be one of
523 // 001, 100 and 101
524 //
525 BitMap = GetGuardedMemoryBits (Address - EFI_PAGE_SIZE, 3);
526 return ((BitMap == BIT0) || (BitMap == BIT2) || (BitMap == (BIT2 | BIT0)));
527 }
528
529 /**
530 Check to see if the page at the given address is a head Guard page or not.
531
532 @param[in] Address The address to check for.
533
534 @return TRUE The page at Address is a head Guard page.
535 @return FALSE The page at Address is not a head Guard page.
536 **/
537 BOOLEAN
538 EFIAPI
539 IsHeadGuard (
540 IN EFI_PHYSICAL_ADDRESS Address
541 )
542 {
543 return (GetGuardedMemoryBits (Address, 2) == BIT1);
544 }
545
546 /**
547 Check to see if the page at the given address is a tail Guard page or not.
548
549 @param[in] Address The address to check for.
550
551 @return TRUE The page at Address is a tail Guard page.
552 @return FALSE The page at Address is not a tail Guard page.
553 **/
554 BOOLEAN
555 EFIAPI
556 IsTailGuard (
557 IN EFI_PHYSICAL_ADDRESS Address
558 )
559 {
560 return (GetGuardedMemoryBits (Address - EFI_PAGE_SIZE, 2) == BIT0);
561 }
562
563 /**
564 Check to see if the page at the given address is guarded or not.
565
566 @param[in] Address The address to check for.
567
568 @return TRUE The page at Address is guarded.
569 @return FALSE The page at Address is not guarded.
570 **/
571 BOOLEAN
572 EFIAPI
573 IsMemoryGuarded (
574 IN EFI_PHYSICAL_ADDRESS Address
575 )
576 {
577 return (GetGuardMapBit (Address) == 1);
578 }
579
580 /**
581 Set the page at the given address to be a Guard page.
582
583 This is done by changing the page table attribute to be NOT PRSENT.
584
585 @param[in] BaseAddress Page address to Guard at.
586
587 @return VOID.
588 **/
589 VOID
590 EFIAPI
591 SetGuardPage (
592 IN EFI_PHYSICAL_ADDRESS BaseAddress
593 )
594 {
595 if (mSmmMemoryAttribute != NULL) {
596 mOnGuarding = TRUE;
597 mSmmMemoryAttribute->SetMemoryAttributes (
598 mSmmMemoryAttribute,
599 BaseAddress,
600 EFI_PAGE_SIZE,
601 EFI_MEMORY_RP
602 );
603 mOnGuarding = FALSE;
604 }
605 }
606
607 /**
608 Unset the Guard page at the given address to the normal memory.
609
610 This is done by changing the page table attribute to be PRSENT.
611
612 @param[in] BaseAddress Page address to Guard at.
613
614 @return VOID.
615 **/
616 VOID
617 EFIAPI
618 UnsetGuardPage (
619 IN EFI_PHYSICAL_ADDRESS BaseAddress
620 )
621 {
622 if (mSmmMemoryAttribute != NULL) {
623 mOnGuarding = TRUE;
624 mSmmMemoryAttribute->ClearMemoryAttributes (
625 mSmmMemoryAttribute,
626 BaseAddress,
627 EFI_PAGE_SIZE,
628 EFI_MEMORY_RP
629 );
630 mOnGuarding = FALSE;
631 }
632 }
633
634 /**
635 Check to see if the memory at the given address should be guarded or not.
636
637 @param[in] MemoryType Memory type to check.
638 @param[in] AllocateType Allocation type to check.
639 @param[in] PageOrPool Indicate a page allocation or pool allocation.
640
641
642 @return TRUE The given type of memory should be guarded.
643 @return FALSE The given type of memory should not be guarded.
644 **/
645 BOOLEAN
646 IsMemoryTypeToGuard (
647 IN EFI_MEMORY_TYPE MemoryType,
648 IN EFI_ALLOCATE_TYPE AllocateType,
649 IN UINT8 PageOrPool
650 )
651 {
652 UINT64 TestBit;
653 UINT64 ConfigBit;
654
655 if ((PcdGet8 (PcdHeapGuardPropertyMask) & PageOrPool) == 0
656 || mOnGuarding
657 || AllocateType == AllocateAddress) {
658 return FALSE;
659 }
660
661 ConfigBit = 0;
662 if ((PageOrPool & GUARD_HEAP_TYPE_POOL) != 0) {
663 ConfigBit |= PcdGet64 (PcdHeapGuardPoolType);
664 }
665
666 if ((PageOrPool & GUARD_HEAP_TYPE_PAGE) != 0) {
667 ConfigBit |= PcdGet64 (PcdHeapGuardPageType);
668 }
669
670 if (MemoryType == EfiRuntimeServicesData ||
671 MemoryType == EfiRuntimeServicesCode) {
672 TestBit = LShiftU64 (1, MemoryType);
673 } else if (MemoryType == EfiMaxMemoryType) {
674 TestBit = (UINT64)-1;
675 } else {
676 TestBit = 0;
677 }
678
679 return ((ConfigBit & TestBit) != 0);
680 }
681
682 /**
683 Check to see if the pool at the given address should be guarded or not.
684
685 @param[in] MemoryType Pool type to check.
686
687
688 @return TRUE The given type of pool should be guarded.
689 @return FALSE The given type of pool should not be guarded.
690 **/
691 BOOLEAN
692 IsPoolTypeToGuard (
693 IN EFI_MEMORY_TYPE MemoryType
694 )
695 {
696 return IsMemoryTypeToGuard (MemoryType, AllocateAnyPages,
697 GUARD_HEAP_TYPE_POOL);
698 }
699
700 /**
701 Check to see if the page at the given address should be guarded or not.
702
703 @param[in] MemoryType Page type to check.
704 @param[in] AllocateType Allocation type to check.
705
706 @return TRUE The given type of page should be guarded.
707 @return FALSE The given type of page should not be guarded.
708 **/
709 BOOLEAN
710 IsPageTypeToGuard (
711 IN EFI_MEMORY_TYPE MemoryType,
712 IN EFI_ALLOCATE_TYPE AllocateType
713 )
714 {
715 return IsMemoryTypeToGuard (MemoryType, AllocateType, GUARD_HEAP_TYPE_PAGE);
716 }
717
718 /**
719 Check to see if the heap guard is enabled for page and/or pool allocation.
720
721 @return TRUE/FALSE.
722 **/
723 BOOLEAN
724 IsHeapGuardEnabled (
725 VOID
726 )
727 {
728 return IsMemoryTypeToGuard (EfiMaxMemoryType, AllocateAnyPages,
729 GUARD_HEAP_TYPE_POOL|GUARD_HEAP_TYPE_PAGE);
730 }
731
732 /**
733 Set head Guard and tail Guard for the given memory range.
734
735 @param[in] Memory Base address of memory to set guard for.
736 @param[in] NumberOfPages Memory size in pages.
737
738 @return VOID.
739 **/
740 VOID
741 SetGuardForMemory (
742 IN EFI_PHYSICAL_ADDRESS Memory,
743 IN UINTN NumberOfPages
744 )
745 {
746 EFI_PHYSICAL_ADDRESS GuardPage;
747
748 //
749 // Set tail Guard
750 //
751 GuardPage = Memory + EFI_PAGES_TO_SIZE (NumberOfPages);
752 if (!IsGuardPage (GuardPage)) {
753 SetGuardPage (GuardPage);
754 }
755
756 // Set head Guard
757 GuardPage = Memory - EFI_PAGES_TO_SIZE (1);
758 if (!IsGuardPage (GuardPage)) {
759 SetGuardPage (GuardPage);
760 }
761
762 //
763 // Mark the memory range as Guarded
764 //
765 SetGuardedMemoryBits (Memory, NumberOfPages);
766 }
767
768 /**
769 Unset head Guard and tail Guard for the given memory range.
770
771 @param[in] Memory Base address of memory to unset guard for.
772 @param[in] NumberOfPages Memory size in pages.
773
774 @return VOID.
775 **/
776 VOID
777 UnsetGuardForMemory (
778 IN EFI_PHYSICAL_ADDRESS Memory,
779 IN UINTN NumberOfPages
780 )
781 {
782 EFI_PHYSICAL_ADDRESS GuardPage;
783 UINT64 GuardBitmap;
784
785 if (NumberOfPages == 0) {
786 return;
787 }
788
789 //
790 // Head Guard must be one page before, if any.
791 //
792 // MSB-> 1 0 <-LSB
793 // -------------------
794 // Head Guard -> 0 1 -> Don't free Head Guard (shared Guard)
795 // Head Guard -> 0 0 -> Free Head Guard either (not shared Guard)
796 // 1 X -> Don't free first page (need a new Guard)
797 // (it'll be turned into a Guard page later)
798 // -------------------
799 // Start -> -1 -2
800 //
801 GuardPage = Memory - EFI_PAGES_TO_SIZE (1);
802 GuardBitmap = GetGuardedMemoryBits (Memory - EFI_PAGES_TO_SIZE (2), 2);
803 if ((GuardBitmap & BIT1) == 0) {
804 //
805 // Head Guard exists.
806 //
807 if ((GuardBitmap & BIT0) == 0) {
808 //
809 // If the head Guard is not a tail Guard of adjacent memory block,
810 // unset it.
811 //
812 UnsetGuardPage (GuardPage);
813 }
814 } else {
815 //
816 // Pages before memory to free are still in Guard. It's a partial free
817 // case. Turn first page of memory block to free into a new Guard.
818 //
819 SetGuardPage (Memory);
820 }
821
822 //
823 // Tail Guard must be the page after this memory block to free, if any.
824 //
825 // MSB-> 1 0 <-LSB
826 // --------------------
827 // 1 0 <- Tail Guard -> Don't free Tail Guard (shared Guard)
828 // 0 0 <- Tail Guard -> Free Tail Guard either (not shared Guard)
829 // X 1 -> Don't free last page (need a new Guard)
830 // (it'll be turned into a Guard page later)
831 // --------------------
832 // +1 +0 <- End
833 //
834 GuardPage = Memory + EFI_PAGES_TO_SIZE (NumberOfPages);
835 GuardBitmap = GetGuardedMemoryBits (GuardPage, 2);
836 if ((GuardBitmap & BIT0) == 0) {
837 //
838 // Tail Guard exists.
839 //
840 if ((GuardBitmap & BIT1) == 0) {
841 //
842 // If the tail Guard is not a head Guard of adjacent memory block,
843 // free it; otherwise, keep it.
844 //
845 UnsetGuardPage (GuardPage);
846 }
847 } else {
848 //
849 // Pages after memory to free are still in Guard. It's a partial free
850 // case. We need to keep one page to be a head Guard.
851 //
852 SetGuardPage (GuardPage - EFI_PAGES_TO_SIZE (1));
853 }
854
855 //
856 // No matter what, we just clear the mark of the Guarded memory.
857 //
858 ClearGuardedMemoryBits(Memory, NumberOfPages);
859 }
860
861 /**
862 Adjust address of free memory according to existing and/or required Guard.
863
864 This function will check if there're existing Guard pages of adjacent
865 memory blocks, and try to use it as the Guard page of the memory to be
866 allocated.
867
868 @param[in] Start Start address of free memory block.
869 @param[in] Size Size of free memory block.
870 @param[in] SizeRequested Size of memory to allocate.
871
872 @return The end address of memory block found.
873 @return 0 if no enough space for the required size of memory and its Guard.
874 **/
875 UINT64
876 AdjustMemoryS (
877 IN UINT64 Start,
878 IN UINT64 Size,
879 IN UINT64 SizeRequested
880 )
881 {
882 UINT64 Target;
883
884 //
885 // UEFI spec requires that allocated pool must be 8-byte aligned. If it's
886 // indicated to put the pool near the Tail Guard, we need extra bytes to
887 // make sure alignment of the returned pool address.
888 //
889 if ((PcdGet8 (PcdHeapGuardPropertyMask) & BIT7) == 0) {
890 SizeRequested = ALIGN_VALUE(SizeRequested, 8);
891 }
892
893 Target = Start + Size - SizeRequested;
894 ASSERT (Target >= Start);
895 if (Target == 0) {
896 return 0;
897 }
898
899 if (!IsGuardPage (Start + Size)) {
900 // No Guard at tail to share. One more page is needed.
901 Target -= EFI_PAGES_TO_SIZE (1);
902 }
903
904 // Out of range?
905 if (Target < Start) {
906 return 0;
907 }
908
909 // At the edge?
910 if (Target == Start) {
911 if (!IsGuardPage (Target - EFI_PAGES_TO_SIZE (1))) {
912 // No enough space for a new head Guard if no Guard at head to share.
913 return 0;
914 }
915 }
916
917 // OK, we have enough pages for memory and its Guards. Return the End of the
918 // free space.
919 return Target + SizeRequested - 1;
920 }
921
922 /**
923 Adjust the start address and number of pages to free according to Guard.
924
925 The purpose of this function is to keep the shared Guard page with adjacent
926 memory block if it's still in guard, or free it if no more sharing. Another
927 is to reserve pages as Guard pages in partial page free situation.
928
929 @param[in,out] Memory Base address of memory to free.
930 @param[in,out] NumberOfPages Size of memory to free.
931
932 @return VOID.
933 **/
934 VOID
935 AdjustMemoryF (
936 IN OUT EFI_PHYSICAL_ADDRESS *Memory,
937 IN OUT UINTN *NumberOfPages
938 )
939 {
940 EFI_PHYSICAL_ADDRESS Start;
941 EFI_PHYSICAL_ADDRESS MemoryToTest;
942 UINTN PagesToFree;
943 UINT64 GuardBitmap;
944
945 if (Memory == NULL || NumberOfPages == NULL || *NumberOfPages == 0) {
946 return;
947 }
948
949 Start = *Memory;
950 PagesToFree = *NumberOfPages;
951
952 //
953 // Head Guard must be one page before, if any.
954 //
955 // MSB-> 1 0 <-LSB
956 // -------------------
957 // Head Guard -> 0 1 -> Don't free Head Guard (shared Guard)
958 // Head Guard -> 0 0 -> Free Head Guard either (not shared Guard)
959 // 1 X -> Don't free first page (need a new Guard)
960 // (it'll be turned into a Guard page later)
961 // -------------------
962 // Start -> -1 -2
963 //
964 MemoryToTest = Start - EFI_PAGES_TO_SIZE (2);
965 GuardBitmap = GetGuardedMemoryBits (MemoryToTest, 2);
966 if ((GuardBitmap & BIT1) == 0) {
967 //
968 // Head Guard exists.
969 //
970 if ((GuardBitmap & BIT0) == 0) {
971 //
972 // If the head Guard is not a tail Guard of adjacent memory block,
973 // free it; otherwise, keep it.
974 //
975 Start -= EFI_PAGES_TO_SIZE (1);
976 PagesToFree += 1;
977 }
978 } else {
979 //
980 // No Head Guard, and pages before memory to free are still in Guard. It's a
981 // partial free case. We need to keep one page to be a tail Guard.
982 //
983 Start += EFI_PAGES_TO_SIZE (1);
984 PagesToFree -= 1;
985 }
986
987 //
988 // Tail Guard must be the page after this memory block to free, if any.
989 //
990 // MSB-> 1 0 <-LSB
991 // --------------------
992 // 1 0 <- Tail Guard -> Don't free Tail Guard (shared Guard)
993 // 0 0 <- Tail Guard -> Free Tail Guard either (not shared Guard)
994 // X 1 -> Don't free last page (need a new Guard)
995 // (it'll be turned into a Guard page later)
996 // --------------------
997 // +1 +0 <- End
998 //
999 MemoryToTest = Start + EFI_PAGES_TO_SIZE (PagesToFree);
1000 GuardBitmap = GetGuardedMemoryBits (MemoryToTest, 2);
1001 if ((GuardBitmap & BIT0) == 0) {
1002 //
1003 // Tail Guard exists.
1004 //
1005 if ((GuardBitmap & BIT1) == 0) {
1006 //
1007 // If the tail Guard is not a head Guard of adjacent memory block,
1008 // free it; otherwise, keep it.
1009 //
1010 PagesToFree += 1;
1011 }
1012 } else if (PagesToFree > 0) {
1013 //
1014 // No Tail Guard, and pages after memory to free are still in Guard. It's a
1015 // partial free case. We need to keep one page to be a head Guard.
1016 //
1017 PagesToFree -= 1;
1018 }
1019
1020 *Memory = Start;
1021 *NumberOfPages = PagesToFree;
1022 }
1023
1024 /**
1025 Adjust the base and number of pages to really allocate according to Guard.
1026
1027 @param[in,out] Memory Base address of free memory.
1028 @param[in,out] NumberOfPages Size of memory to allocate.
1029
1030 @return VOID.
1031 **/
1032 VOID
1033 AdjustMemoryA (
1034 IN OUT EFI_PHYSICAL_ADDRESS *Memory,
1035 IN OUT UINTN *NumberOfPages
1036 )
1037 {
1038 //
1039 // FindFreePages() has already taken the Guard into account. It's safe to
1040 // adjust the start address and/or number of pages here, to make sure that
1041 // the Guards are also "allocated".
1042 //
1043 if (!IsGuardPage (*Memory + EFI_PAGES_TO_SIZE (*NumberOfPages))) {
1044 // No tail Guard, add one.
1045 *NumberOfPages += 1;
1046 }
1047
1048 if (!IsGuardPage (*Memory - EFI_PAGE_SIZE)) {
1049 // No head Guard, add one.
1050 *Memory -= EFI_PAGE_SIZE;
1051 *NumberOfPages += 1;
1052 }
1053 }
1054
1055 /**
1056 Adjust the pool head position to make sure the Guard page is adjavent to
1057 pool tail or pool head.
1058
1059 @param[in] Memory Base address of memory allocated.
1060 @param[in] NoPages Number of pages actually allocated.
1061 @param[in] Size Size of memory requested.
1062 (plus pool head/tail overhead)
1063
1064 @return Address of pool head
1065 **/
1066 VOID *
1067 AdjustPoolHeadA (
1068 IN EFI_PHYSICAL_ADDRESS Memory,
1069 IN UINTN NoPages,
1070 IN UINTN Size
1071 )
1072 {
1073 if (Memory == 0 || (PcdGet8 (PcdHeapGuardPropertyMask) & BIT7) != 0) {
1074 //
1075 // Pool head is put near the head Guard
1076 //
1077 return (VOID *)(UINTN)Memory;
1078 }
1079
1080 //
1081 // Pool head is put near the tail Guard
1082 //
1083 Size = ALIGN_VALUE (Size, 8);
1084 return (VOID *)(UINTN)(Memory + EFI_PAGES_TO_SIZE (NoPages) - Size);
1085 }
1086
1087 /**
1088 Get the page base address according to pool head address.
1089
1090 @param[in] Memory Head address of pool to free.
1091
1092 @return Address of pool head.
1093 **/
1094 VOID *
1095 AdjustPoolHeadF (
1096 IN EFI_PHYSICAL_ADDRESS Memory
1097 )
1098 {
1099 if (Memory == 0 || (PcdGet8 (PcdHeapGuardPropertyMask) & BIT7) != 0) {
1100 //
1101 // Pool head is put near the head Guard
1102 //
1103 return (VOID *)(UINTN)Memory;
1104 }
1105
1106 //
1107 // Pool head is put near the tail Guard
1108 //
1109 return (VOID *)(UINTN)(Memory & ~EFI_PAGE_MASK);
1110 }
1111
1112 /**
1113 Helper function of memory allocation with Guard pages.
1114
1115 @param FreePageList The free page node.
1116 @param NumberOfPages Number of pages to be allocated.
1117 @param MaxAddress Request to allocate memory below this address.
1118 @param MemoryType Type of memory requested.
1119
1120 @return Memory address of allocated pages.
1121 **/
1122 UINTN
1123 InternalAllocMaxAddressWithGuard (
1124 IN OUT LIST_ENTRY *FreePageList,
1125 IN UINTN NumberOfPages,
1126 IN UINTN MaxAddress,
1127 IN EFI_MEMORY_TYPE MemoryType
1128
1129 )
1130 {
1131 LIST_ENTRY *Node;
1132 FREE_PAGE_LIST *Pages;
1133 UINTN PagesToAlloc;
1134 UINTN HeadGuard;
1135 UINTN TailGuard;
1136 UINTN Address;
1137
1138 for (Node = FreePageList->BackLink; Node != FreePageList;
1139 Node = Node->BackLink) {
1140 Pages = BASE_CR (Node, FREE_PAGE_LIST, Link);
1141 if (Pages->NumberOfPages >= NumberOfPages &&
1142 (UINTN)Pages + EFI_PAGES_TO_SIZE (NumberOfPages) - 1 <= MaxAddress) {
1143
1144 //
1145 // We may need 1 or 2 more pages for Guard. Check it out.
1146 //
1147 PagesToAlloc = NumberOfPages;
1148 TailGuard = (UINTN)Pages + EFI_PAGES_TO_SIZE (Pages->NumberOfPages);
1149 if (!IsGuardPage (TailGuard)) {
1150 //
1151 // Add one if no Guard at the end of current free memory block.
1152 //
1153 PagesToAlloc += 1;
1154 TailGuard = 0;
1155 }
1156
1157 HeadGuard = (UINTN)Pages +
1158 EFI_PAGES_TO_SIZE (Pages->NumberOfPages - PagesToAlloc) -
1159 EFI_PAGE_SIZE;
1160 if (!IsGuardPage (HeadGuard)) {
1161 //
1162 // Add one if no Guard at the page before the address to allocate
1163 //
1164 PagesToAlloc += 1;
1165 HeadGuard = 0;
1166 }
1167
1168 if (Pages->NumberOfPages < PagesToAlloc) {
1169 // Not enough space to allocate memory with Guards? Try next block.
1170 continue;
1171 }
1172
1173 Address = InternalAllocPagesOnOneNode (Pages, PagesToAlloc, MaxAddress);
1174 ConvertSmmMemoryMapEntry(MemoryType, Address, PagesToAlloc, FALSE);
1175 CoreFreeMemoryMapStack();
1176 if (HeadGuard == 0) {
1177 // Don't pass the Guard page to user.
1178 Address += EFI_PAGE_SIZE;
1179 }
1180 SetGuardForMemory (Address, NumberOfPages);
1181 return Address;
1182 }
1183 }
1184
1185 return (UINTN)(-1);
1186 }
1187
1188 /**
1189 Helper function of memory free with Guard pages.
1190
1191 @param[in] Memory Base address of memory being freed.
1192 @param[in] NumberOfPages The number of pages to free.
1193 @param[in] AddRegion If this memory is new added region.
1194
1195 @retval EFI_NOT_FOUND Could not find the entry that covers the range.
1196 @retval EFI_INVALID_PARAMETER Address not aligned, Address is zero or NumberOfPages is zero.
1197 @return EFI_SUCCESS Pages successfully freed.
1198 **/
1199 EFI_STATUS
1200 SmmInternalFreePagesExWithGuard (
1201 IN EFI_PHYSICAL_ADDRESS Memory,
1202 IN UINTN NumberOfPages,
1203 IN BOOLEAN AddRegion
1204 )
1205 {
1206 EFI_PHYSICAL_ADDRESS MemoryToFree;
1207 UINTN PagesToFree;
1208
1209 if (((Memory & EFI_PAGE_MASK) != 0) || (Memory == 0) || (NumberOfPages == 0)) {
1210 return EFI_INVALID_PARAMETER;
1211 }
1212
1213 MemoryToFree = Memory;
1214 PagesToFree = NumberOfPages;
1215
1216 AdjustMemoryF (&MemoryToFree, &PagesToFree);
1217 UnsetGuardForMemory (Memory, NumberOfPages);
1218 if (PagesToFree == 0) {
1219 return EFI_SUCCESS;
1220 }
1221
1222 return SmmInternalFreePagesEx (MemoryToFree, PagesToFree, AddRegion);
1223 }
1224
1225 /**
1226 Set all Guard pages which cannot be set during the non-SMM mode time.
1227 **/
1228 VOID
1229 SetAllGuardPages (
1230 VOID
1231 )
1232 {
1233 UINTN Entries[GUARDED_HEAP_MAP_TABLE_DEPTH];
1234 UINTN Shifts[GUARDED_HEAP_MAP_TABLE_DEPTH];
1235 UINTN Indices[GUARDED_HEAP_MAP_TABLE_DEPTH];
1236 UINT64 Tables[GUARDED_HEAP_MAP_TABLE_DEPTH];
1237 UINT64 Addresses[GUARDED_HEAP_MAP_TABLE_DEPTH];
1238 UINT64 TableEntry;
1239 UINT64 Address;
1240 UINT64 GuardPage;
1241 INTN Level;
1242 UINTN Index;
1243 BOOLEAN OnGuarding;
1244
1245 if (mGuardedMemoryMap == 0 ||
1246 mMapLevel == 0 ||
1247 mMapLevel > GUARDED_HEAP_MAP_TABLE_DEPTH) {
1248 return;
1249 }
1250
1251 CopyMem (Entries, mLevelMask, sizeof (Entries));
1252 CopyMem (Shifts, mLevelShift, sizeof (Shifts));
1253
1254 SetMem (Tables, sizeof(Tables), 0);
1255 SetMem (Addresses, sizeof(Addresses), 0);
1256 SetMem (Indices, sizeof(Indices), 0);
1257
1258 Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;
1259 Tables[Level] = mGuardedMemoryMap;
1260 Address = 0;
1261 OnGuarding = FALSE;
1262
1263 DEBUG_CODE (
1264 DumpGuardedMemoryBitmap ();
1265 );
1266
1267 while (TRUE) {
1268 if (Indices[Level] > Entries[Level]) {
1269 Tables[Level] = 0;
1270 Level -= 1;
1271 } else {
1272
1273 TableEntry = ((UINT64 *)(UINTN)(Tables[Level]))[Indices[Level]];
1274 Address = Addresses[Level];
1275
1276 if (TableEntry == 0) {
1277
1278 OnGuarding = FALSE;
1279
1280 } else if (Level < GUARDED_HEAP_MAP_TABLE_DEPTH - 1) {
1281
1282 Level += 1;
1283 Tables[Level] = TableEntry;
1284 Addresses[Level] = Address;
1285 Indices[Level] = 0;
1286
1287 continue;
1288
1289 } else {
1290
1291 Index = 0;
1292 while (Index < GUARDED_HEAP_MAP_ENTRY_BITS) {
1293 if ((TableEntry & 1) == 1) {
1294 if (OnGuarding) {
1295 GuardPage = 0;
1296 } else {
1297 GuardPage = Address - EFI_PAGE_SIZE;
1298 }
1299 OnGuarding = TRUE;
1300 } else {
1301 if (OnGuarding) {
1302 GuardPage = Address;
1303 } else {
1304 GuardPage = 0;
1305 }
1306 OnGuarding = FALSE;
1307 }
1308
1309 if (GuardPage != 0) {
1310 SetGuardPage (GuardPage);
1311 }
1312
1313 if (TableEntry == 0) {
1314 break;
1315 }
1316
1317 TableEntry = RShiftU64 (TableEntry, 1);
1318 Address += EFI_PAGE_SIZE;
1319 Index += 1;
1320 }
1321 }
1322 }
1323
1324 if (Level < (GUARDED_HEAP_MAP_TABLE_DEPTH - (INTN)mMapLevel)) {
1325 break;
1326 }
1327
1328 Indices[Level] += 1;
1329 Address = (Level == 0) ? 0 : Addresses[Level - 1];
1330 Addresses[Level] = Address | LShiftU64(Indices[Level], Shifts[Level]);
1331
1332 }
1333 }
1334
1335 /**
1336 Hook function used to set all Guard pages after entering SMM mode.
1337 **/
1338 VOID
1339 SmmEntryPointMemoryManagementHook (
1340 VOID
1341 )
1342 {
1343 EFI_STATUS Status;
1344
1345 if (mSmmMemoryAttribute == NULL) {
1346 Status = SmmLocateProtocol (
1347 &gEdkiiSmmMemoryAttributeProtocolGuid,
1348 NULL,
1349 (VOID **)&mSmmMemoryAttribute
1350 );
1351 if (!EFI_ERROR(Status)) {
1352 SetAllGuardPages ();
1353 }
1354 }
1355 }
1356
1357 /**
1358 Helper function to convert a UINT64 value in binary to a string.
1359
1360 @param[in] Value Value of a UINT64 integer.
1361 @param[out] BinString String buffer to contain the conversion result.
1362
1363 @return VOID.
1364 **/
1365 VOID
1366 Uint64ToBinString (
1367 IN UINT64 Value,
1368 OUT CHAR8 *BinString
1369 )
1370 {
1371 UINTN Index;
1372
1373 if (BinString == NULL) {
1374 return;
1375 }
1376
1377 for (Index = 64; Index > 0; --Index) {
1378 BinString[Index - 1] = '0' + (Value & 1);
1379 Value = RShiftU64 (Value, 1);
1380 }
1381 BinString[64] = '\0';
1382 }
1383
1384 /**
1385 Dump the guarded memory bit map.
1386 **/
1387 VOID
1388 EFIAPI
1389 DumpGuardedMemoryBitmap (
1390 VOID
1391 )
1392 {
1393 UINTN Entries[GUARDED_HEAP_MAP_TABLE_DEPTH];
1394 UINTN Shifts[GUARDED_HEAP_MAP_TABLE_DEPTH];
1395 UINTN Indices[GUARDED_HEAP_MAP_TABLE_DEPTH];
1396 UINT64 Tables[GUARDED_HEAP_MAP_TABLE_DEPTH];
1397 UINT64 Addresses[GUARDED_HEAP_MAP_TABLE_DEPTH];
1398 UINT64 TableEntry;
1399 UINT64 Address;
1400 INTN Level;
1401 UINTN RepeatZero;
1402 CHAR8 String[GUARDED_HEAP_MAP_ENTRY_BITS + 1];
1403 CHAR8 *Ruler1;
1404 CHAR8 *Ruler2;
1405
1406 if (mGuardedMemoryMap == 0 ||
1407 mMapLevel == 0 ||
1408 mMapLevel > GUARDED_HEAP_MAP_TABLE_DEPTH) {
1409 return;
1410 }
1411
1412 Ruler1 = " 3 2 1 0";
1413 Ruler2 = "FEDCBA9876543210FEDCBA9876543210FEDCBA9876543210FEDCBA9876543210";
1414
1415 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "============================="
1416 " Guarded Memory Bitmap "
1417 "==============================\r\n"));
1418 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, " %a\r\n", Ruler1));
1419 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, " %a\r\n", Ruler2));
1420
1421 CopyMem (Entries, mLevelMask, sizeof (Entries));
1422 CopyMem (Shifts, mLevelShift, sizeof (Shifts));
1423
1424 SetMem (Indices, sizeof(Indices), 0);
1425 SetMem (Tables, sizeof(Tables), 0);
1426 SetMem (Addresses, sizeof(Addresses), 0);
1427
1428 Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;
1429 Tables[Level] = mGuardedMemoryMap;
1430 Address = 0;
1431 RepeatZero = 0;
1432
1433 while (TRUE) {
1434 if (Indices[Level] > Entries[Level]) {
1435
1436 Tables[Level] = 0;
1437 Level -= 1;
1438 RepeatZero = 0;
1439
1440 DEBUG ((
1441 HEAP_GUARD_DEBUG_LEVEL,
1442 "========================================="
1443 "=========================================\r\n"
1444 ));
1445
1446 } else {
1447
1448 TableEntry = ((UINT64 *)(UINTN)Tables[Level])[Indices[Level]];
1449 Address = Addresses[Level];
1450
1451 if (TableEntry == 0) {
1452
1453 if (Level == GUARDED_HEAP_MAP_TABLE_DEPTH - 1) {
1454 if (RepeatZero == 0) {
1455 Uint64ToBinString(TableEntry, String);
1456 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "%016lx: %a\r\n", Address, String));
1457 } else if (RepeatZero == 1) {
1458 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "... : ...\r\n"));
1459 }
1460 RepeatZero += 1;
1461 }
1462
1463 } else if (Level < GUARDED_HEAP_MAP_TABLE_DEPTH - 1) {
1464
1465 Level += 1;
1466 Tables[Level] = TableEntry;
1467 Addresses[Level] = Address;
1468 Indices[Level] = 0;
1469 RepeatZero = 0;
1470
1471 continue;
1472
1473 } else {
1474
1475 RepeatZero = 0;
1476 Uint64ToBinString(TableEntry, String);
1477 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "%016lx: %a\r\n", Address, String));
1478
1479 }
1480 }
1481
1482 if (Level < (GUARDED_HEAP_MAP_TABLE_DEPTH - (INTN)mMapLevel)) {
1483 break;
1484 }
1485
1486 Indices[Level] += 1;
1487 Address = (Level == 0) ? 0 : Addresses[Level - 1];
1488 Addresses[Level] = Address | LShiftU64(Indices[Level], Shifts[Level]);
1489
1490 }
1491 }
1492
1493 /**
1494 Debug function used to verify if the Guard page is well set or not.
1495
1496 @param[in] BaseAddress Address of memory to check.
1497 @param[in] NumberOfPages Size of memory in pages.
1498
1499 @return TRUE The head Guard and tail Guard are both well set.
1500 @return FALSE The head Guard and/or tail Guard are not well set.
1501 **/
1502 BOOLEAN
1503 VerifyMemoryGuard (
1504 IN EFI_PHYSICAL_ADDRESS BaseAddress,
1505 IN UINTN NumberOfPages
1506 )
1507 {
1508 EFI_STATUS Status;
1509 UINT64 Attribute;
1510 EFI_PHYSICAL_ADDRESS Address;
1511
1512 if (mSmmMemoryAttribute == NULL) {
1513 return TRUE;
1514 }
1515
1516 Attribute = 0;
1517 Address = BaseAddress - EFI_PAGE_SIZE;
1518 Status = mSmmMemoryAttribute->GetMemoryAttributes (
1519 mSmmMemoryAttribute,
1520 Address,
1521 EFI_PAGE_SIZE,
1522 &Attribute
1523 );
1524 if (EFI_ERROR (Status) || (Attribute & EFI_MEMORY_RP) == 0) {
1525 DEBUG ((DEBUG_ERROR, "Head Guard is not set at: %016lx (%016lX)!!!\r\n",
1526 Address, Attribute));
1527 DumpGuardedMemoryBitmap ();
1528 return FALSE;
1529 }
1530
1531 Attribute = 0;
1532 Address = BaseAddress + EFI_PAGES_TO_SIZE (NumberOfPages);
1533 Status = mSmmMemoryAttribute->GetMemoryAttributes (
1534 mSmmMemoryAttribute,
1535 Address,
1536 EFI_PAGE_SIZE,
1537 &Attribute
1538 );
1539 if (EFI_ERROR (Status) || (Attribute & EFI_MEMORY_RP) == 0) {
1540 DEBUG ((DEBUG_ERROR, "Tail Guard is not set at: %016lx (%016lX)!!!\r\n",
1541 Address, Attribute));
1542 DumpGuardedMemoryBitmap ();
1543 return FALSE;
1544 }
1545
1546 return TRUE;
1547 }
1548