]> git.proxmox.com Git - mirror_edk2.git/blob - MdeModulePkg/Core/PiSmmCore/HeapGuard.c
MdeModulePkg/PiSmmCore: add API parameter check
[mirror_edk2.git] / MdeModulePkg / Core / PiSmmCore / HeapGuard.c
1 /** @file
2 UEFI Heap Guard functions.
3
4 Copyright (c) 2017, Intel Corporation. All rights reserved.<BR>
5 This program and the accompanying materials
6 are licensed and made available under the terms and conditions of the BSD License
7 which accompanies this distribution. The full text of the license may be found at
8 http://opensource.org/licenses/bsd-license.php
9
10 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
11 WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
12
13 **/
14
15 #include "HeapGuard.h"
16
17 //
18 // Global to avoid infinite reentrance of memory allocation when updating
19 // page table attributes, which may need allocating pages for new PDE/PTE.
20 //
21 GLOBAL_REMOVE_IF_UNREFERENCED BOOLEAN mOnGuarding = FALSE;
22
23 //
24 // Pointer to table tracking the Guarded memory with bitmap, in which '1'
25 // is used to indicate memory guarded. '0' might be free memory or Guard
26 // page itself, depending on status of memory adjacent to it.
27 //
28 GLOBAL_REMOVE_IF_UNREFERENCED UINT64 mGuardedMemoryMap = 0;
29
30 //
31 // Current depth level of map table pointed by mGuardedMemoryMap.
32 // mMapLevel must be initialized at least by 1. It will be automatically
33 // updated according to the address of memory just tracked.
34 //
35 GLOBAL_REMOVE_IF_UNREFERENCED UINTN mMapLevel = 1;
36
37 //
38 // Shift and mask for each level of map table
39 //
40 GLOBAL_REMOVE_IF_UNREFERENCED UINTN mLevelShift[GUARDED_HEAP_MAP_TABLE_DEPTH]
41 = GUARDED_HEAP_MAP_TABLE_DEPTH_SHIFTS;
42 GLOBAL_REMOVE_IF_UNREFERENCED UINTN mLevelMask[GUARDED_HEAP_MAP_TABLE_DEPTH]
43 = GUARDED_HEAP_MAP_TABLE_DEPTH_MASKS;
44
45 //
46 // SMM memory attribute protocol
47 //
48 EDKII_SMM_MEMORY_ATTRIBUTE_PROTOCOL *mSmmMemoryAttribute = NULL;
49
50 /**
51 Set corresponding bits in bitmap table to 1 according to the address.
52
53 @param[in] Address Start address to set for.
54 @param[in] BitNumber Number of bits to set.
55 @param[in] BitMap Pointer to bitmap which covers the Address.
56
57 @return VOID
58 **/
59 STATIC
60 VOID
61 SetBits (
62 IN EFI_PHYSICAL_ADDRESS Address,
63 IN UINTN BitNumber,
64 IN UINT64 *BitMap
65 )
66 {
67 UINTN Lsbs;
68 UINTN Qwords;
69 UINTN Msbs;
70 UINTN StartBit;
71 UINTN EndBit;
72
73 StartBit = (UINTN)GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address);
74 EndBit = (StartBit + BitNumber - 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
75
76 if ((StartBit + BitNumber) > GUARDED_HEAP_MAP_ENTRY_BITS) {
77 Msbs = (GUARDED_HEAP_MAP_ENTRY_BITS - StartBit) %
78 GUARDED_HEAP_MAP_ENTRY_BITS;
79 Lsbs = (EndBit + 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
80 Qwords = (BitNumber - Msbs) / GUARDED_HEAP_MAP_ENTRY_BITS;
81 } else {
82 Msbs = BitNumber;
83 Lsbs = 0;
84 Qwords = 0;
85 }
86
87 if (Msbs > 0) {
88 *BitMap |= LShiftU64 (LShiftU64 (1, Msbs) - 1, StartBit);
89 BitMap += 1;
90 }
91
92 if (Qwords > 0) {
93 SetMem64 ((VOID *)BitMap, Qwords * GUARDED_HEAP_MAP_ENTRY_BYTES,
94 (UINT64)-1);
95 BitMap += Qwords;
96 }
97
98 if (Lsbs > 0) {
99 *BitMap |= (LShiftU64 (1, Lsbs) - 1);
100 }
101 }
102
103 /**
104 Set corresponding bits in bitmap table to 0 according to the address.
105
106 @param[in] Address Start address to set for.
107 @param[in] BitNumber Number of bits to set.
108 @param[in] BitMap Pointer to bitmap which covers the Address.
109
110 @return VOID.
111 **/
112 STATIC
113 VOID
114 ClearBits (
115 IN EFI_PHYSICAL_ADDRESS Address,
116 IN UINTN BitNumber,
117 IN UINT64 *BitMap
118 )
119 {
120 UINTN Lsbs;
121 UINTN Qwords;
122 UINTN Msbs;
123 UINTN StartBit;
124 UINTN EndBit;
125
126 StartBit = (UINTN)GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address);
127 EndBit = (StartBit + BitNumber - 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
128
129 if ((StartBit + BitNumber) > GUARDED_HEAP_MAP_ENTRY_BITS) {
130 Msbs = (GUARDED_HEAP_MAP_ENTRY_BITS - StartBit) %
131 GUARDED_HEAP_MAP_ENTRY_BITS;
132 Lsbs = (EndBit + 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
133 Qwords = (BitNumber - Msbs) / GUARDED_HEAP_MAP_ENTRY_BITS;
134 } else {
135 Msbs = BitNumber;
136 Lsbs = 0;
137 Qwords = 0;
138 }
139
140 if (Msbs > 0) {
141 *BitMap &= ~LShiftU64 (LShiftU64 (1, Msbs) - 1, StartBit);
142 BitMap += 1;
143 }
144
145 if (Qwords > 0) {
146 SetMem64 ((VOID *)BitMap, Qwords * GUARDED_HEAP_MAP_ENTRY_BYTES, 0);
147 BitMap += Qwords;
148 }
149
150 if (Lsbs > 0) {
151 *BitMap &= ~(LShiftU64 (1, Lsbs) - 1);
152 }
153 }
154
155 /**
156 Get corresponding bits in bitmap table according to the address.
157
158 The value of bit 0 corresponds to the status of memory at given Address.
159 No more than 64 bits can be retrieved in one call.
160
161 @param[in] Address Start address to retrieve bits for.
162 @param[in] BitNumber Number of bits to get.
163 @param[in] BitMap Pointer to bitmap which covers the Address.
164
165 @return An integer containing the bits information.
166 **/
167 STATIC
168 UINT64
169 GetBits (
170 IN EFI_PHYSICAL_ADDRESS Address,
171 IN UINTN BitNumber,
172 IN UINT64 *BitMap
173 )
174 {
175 UINTN StartBit;
176 UINTN EndBit;
177 UINTN Lsbs;
178 UINTN Msbs;
179 UINT64 Result;
180
181 ASSERT (BitNumber <= GUARDED_HEAP_MAP_ENTRY_BITS);
182
183 StartBit = (UINTN)GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address);
184 EndBit = (StartBit + BitNumber - 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
185
186 if ((StartBit + BitNumber) > GUARDED_HEAP_MAP_ENTRY_BITS) {
187 Msbs = GUARDED_HEAP_MAP_ENTRY_BITS - StartBit;
188 Lsbs = (EndBit + 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
189 } else {
190 Msbs = BitNumber;
191 Lsbs = 0;
192 }
193
194 Result = RShiftU64 ((*BitMap), StartBit) & (LShiftU64 (1, Msbs) - 1);
195 if (Lsbs > 0) {
196 BitMap += 1;
197 Result |= LShiftU64 ((*BitMap) & (LShiftU64 (1, Lsbs) - 1), Msbs);
198 }
199
200 return Result;
201 }
202
203 /**
204 Helper function to allocate pages without Guard for internal uses.
205
206 @param[in] Pages Page number.
207
208 @return Address of memory allocated.
209 **/
210 VOID *
211 PageAlloc (
212 IN UINTN Pages
213 )
214 {
215 EFI_STATUS Status;
216 EFI_PHYSICAL_ADDRESS Memory;
217
218 Status = SmmInternalAllocatePages (AllocateAnyPages, EfiRuntimeServicesData,
219 Pages, &Memory, FALSE);
220 if (EFI_ERROR (Status)) {
221 Memory = 0;
222 }
223
224 return (VOID *)(UINTN)Memory;
225 }
226
227 /**
228 Locate the pointer of bitmap from the guarded memory bitmap tables, which
229 covers the given Address.
230
231 @param[in] Address Start address to search the bitmap for.
232 @param[in] AllocMapUnit Flag to indicate memory allocation for the table.
233 @param[out] BitMap Pointer to bitmap which covers the Address.
234
235 @return The bit number from given Address to the end of current map table.
236 **/
237 UINTN
238 FindGuardedMemoryMap (
239 IN EFI_PHYSICAL_ADDRESS Address,
240 IN BOOLEAN AllocMapUnit,
241 OUT UINT64 **BitMap
242 )
243 {
244 UINTN Level;
245 UINT64 *GuardMap;
246 UINT64 MapMemory;
247 UINTN Index;
248 UINTN Size;
249 UINTN BitsToUnitEnd;
250
251 //
252 // Adjust current map table depth according to the address to access
253 //
254 while (mMapLevel < GUARDED_HEAP_MAP_TABLE_DEPTH
255 &&
256 RShiftU64 (
257 Address,
258 mLevelShift[GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel - 1]
259 ) != 0) {
260
261 if (mGuardedMemoryMap != 0) {
262 Size = (mLevelMask[GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel - 1] + 1)
263 * GUARDED_HEAP_MAP_ENTRY_BYTES;
264 MapMemory = (UINT64)(UINTN)PageAlloc (EFI_SIZE_TO_PAGES (Size));
265 ASSERT (MapMemory != 0);
266
267 SetMem ((VOID *)(UINTN)MapMemory, Size, 0);
268
269 *(UINT64 *)(UINTN)MapMemory = mGuardedMemoryMap;
270 mGuardedMemoryMap = MapMemory;
271 }
272
273 mMapLevel++;
274
275 }
276
277 GuardMap = &mGuardedMemoryMap;
278 for (Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;
279 Level < GUARDED_HEAP_MAP_TABLE_DEPTH;
280 ++Level) {
281
282 if (*GuardMap == 0) {
283 if (!AllocMapUnit) {
284 GuardMap = NULL;
285 break;
286 }
287
288 Size = (mLevelMask[Level] + 1) * GUARDED_HEAP_MAP_ENTRY_BYTES;
289 MapMemory = (UINT64)(UINTN)PageAlloc (EFI_SIZE_TO_PAGES (Size));
290 ASSERT (MapMemory != 0);
291
292 SetMem ((VOID *)(UINTN)MapMemory, Size, 0);
293 *GuardMap = MapMemory;
294 }
295
296 Index = (UINTN)RShiftU64 (Address, mLevelShift[Level]);
297 Index &= mLevelMask[Level];
298 GuardMap = (UINT64 *)(UINTN)((*GuardMap) + Index * sizeof (UINT64));
299
300 }
301
302 BitsToUnitEnd = GUARDED_HEAP_MAP_BITS - GUARDED_HEAP_MAP_BIT_INDEX (Address);
303 *BitMap = GuardMap;
304
305 return BitsToUnitEnd;
306 }
307
308 /**
309 Set corresponding bits in bitmap table to 1 according to given memory range.
310
311 @param[in] Address Memory address to guard from.
312 @param[in] NumberOfPages Number of pages to guard.
313
314 @return VOID
315 **/
316 VOID
317 EFIAPI
318 SetGuardedMemoryBits (
319 IN EFI_PHYSICAL_ADDRESS Address,
320 IN UINTN NumberOfPages
321 )
322 {
323 UINT64 *BitMap;
324 UINTN Bits;
325 UINTN BitsToUnitEnd;
326
327 while (NumberOfPages > 0) {
328 BitsToUnitEnd = FindGuardedMemoryMap (Address, TRUE, &BitMap);
329 ASSERT (BitMap != NULL);
330
331 if (NumberOfPages > BitsToUnitEnd) {
332 // Cross map unit
333 Bits = BitsToUnitEnd;
334 } else {
335 Bits = NumberOfPages;
336 }
337
338 SetBits (Address, Bits, BitMap);
339
340 NumberOfPages -= Bits;
341 Address += EFI_PAGES_TO_SIZE (Bits);
342 }
343 }
344
345 /**
346 Clear corresponding bits in bitmap table according to given memory range.
347
348 @param[in] Address Memory address to unset from.
349 @param[in] NumberOfPages Number of pages to unset guard.
350
351 @return VOID
352 **/
353 VOID
354 EFIAPI
355 ClearGuardedMemoryBits (
356 IN EFI_PHYSICAL_ADDRESS Address,
357 IN UINTN NumberOfPages
358 )
359 {
360 UINT64 *BitMap;
361 UINTN Bits;
362 UINTN BitsToUnitEnd;
363
364 while (NumberOfPages > 0) {
365 BitsToUnitEnd = FindGuardedMemoryMap (Address, TRUE, &BitMap);
366 ASSERT (BitMap != NULL);
367
368 if (NumberOfPages > BitsToUnitEnd) {
369 // Cross map unit
370 Bits = BitsToUnitEnd;
371 } else {
372 Bits = NumberOfPages;
373 }
374
375 ClearBits (Address, Bits, BitMap);
376
377 NumberOfPages -= Bits;
378 Address += EFI_PAGES_TO_SIZE (Bits);
379 }
380 }
381
382 /**
383 Retrieve corresponding bits in bitmap table according to given memory range.
384
385 @param[in] Address Memory address to retrieve from.
386 @param[in] NumberOfPages Number of pages to retrieve.
387
388 @return An integer containing the guarded memory bitmap.
389 **/
390 UINTN
391 GetGuardedMemoryBits (
392 IN EFI_PHYSICAL_ADDRESS Address,
393 IN UINTN NumberOfPages
394 )
395 {
396 UINT64 *BitMap;
397 UINTN Bits;
398 UINTN Result;
399 UINTN Shift;
400 UINTN BitsToUnitEnd;
401
402 ASSERT (NumberOfPages <= GUARDED_HEAP_MAP_ENTRY_BITS);
403
404 Result = 0;
405 Shift = 0;
406 while (NumberOfPages > 0) {
407 BitsToUnitEnd = FindGuardedMemoryMap (Address, FALSE, &BitMap);
408
409 if (NumberOfPages > BitsToUnitEnd) {
410 // Cross map unit
411 Bits = BitsToUnitEnd;
412 } else {
413 Bits = NumberOfPages;
414 }
415
416 if (BitMap != NULL) {
417 Result |= LShiftU64 (GetBits (Address, Bits, BitMap), Shift);
418 }
419
420 Shift += Bits;
421 NumberOfPages -= Bits;
422 Address += EFI_PAGES_TO_SIZE (Bits);
423 }
424
425 return Result;
426 }
427
428 /**
429 Get bit value in bitmap table for the given address.
430
431 @param[in] Address The address to retrieve for.
432
433 @return 1 or 0.
434 **/
435 UINTN
436 EFIAPI
437 GetGuardMapBit (
438 IN EFI_PHYSICAL_ADDRESS Address
439 )
440 {
441 UINT64 *GuardMap;
442
443 FindGuardedMemoryMap (Address, FALSE, &GuardMap);
444 if (GuardMap != NULL) {
445 if (RShiftU64 (*GuardMap,
446 GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address)) & 1) {
447 return 1;
448 }
449 }
450
451 return 0;
452 }
453
454 /**
455 Set the bit in bitmap table for the given address.
456
457 @param[in] Address The address to set for.
458
459 @return VOID.
460 **/
461 VOID
462 EFIAPI
463 SetGuardMapBit (
464 IN EFI_PHYSICAL_ADDRESS Address
465 )
466 {
467 UINT64 *GuardMap;
468 UINT64 BitMask;
469
470 FindGuardedMemoryMap (Address, TRUE, &GuardMap);
471 if (GuardMap != NULL) {
472 BitMask = LShiftU64 (1, GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address));
473 *GuardMap |= BitMask;
474 }
475 }
476
477 /**
478 Clear the bit in bitmap table for the given address.
479
480 @param[in] Address The address to clear for.
481
482 @return VOID.
483 **/
484 VOID
485 EFIAPI
486 ClearGuardMapBit (
487 IN EFI_PHYSICAL_ADDRESS Address
488 )
489 {
490 UINT64 *GuardMap;
491 UINT64 BitMask;
492
493 FindGuardedMemoryMap (Address, TRUE, &GuardMap);
494 if (GuardMap != NULL) {
495 BitMask = LShiftU64 (1, GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address));
496 *GuardMap &= ~BitMask;
497 }
498 }
499
500 /**
501 Check to see if the page at the given address is a Guard page or not.
502
503 @param[in] Address The address to check for.
504
505 @return TRUE The page at Address is a Guard page.
506 @return FALSE The page at Address is not a Guard page.
507 **/
508 BOOLEAN
509 EFIAPI
510 IsGuardPage (
511 IN EFI_PHYSICAL_ADDRESS Address
512 )
513 {
514 UINTN BitMap;
515
516 //
517 // There must be at least one guarded page before and/or after given
518 // address if it's a Guard page. The bitmap pattern should be one of
519 // 001, 100 and 101
520 //
521 BitMap = GetGuardedMemoryBits (Address - EFI_PAGE_SIZE, 3);
522 return ((BitMap == BIT0) || (BitMap == BIT2) || (BitMap == (BIT2 | BIT0)));
523 }
524
525 /**
526 Check to see if the page at the given address is a head Guard page or not.
527
528 @param[in] Address The address to check for.
529
530 @return TRUE The page at Address is a head Guard page.
531 @return FALSE The page at Address is not a head Guard page.
532 **/
533 BOOLEAN
534 EFIAPI
535 IsHeadGuard (
536 IN EFI_PHYSICAL_ADDRESS Address
537 )
538 {
539 return (GetGuardedMemoryBits (Address, 2) == BIT1);
540 }
541
542 /**
543 Check to see if the page at the given address is a tail Guard page or not.
544
545 @param[in] Address The address to check for.
546
547 @return TRUE The page at Address is a tail Guard page.
548 @return FALSE The page at Address is not a tail Guard page.
549 **/
550 BOOLEAN
551 EFIAPI
552 IsTailGuard (
553 IN EFI_PHYSICAL_ADDRESS Address
554 )
555 {
556 return (GetGuardedMemoryBits (Address - EFI_PAGE_SIZE, 2) == BIT0);
557 }
558
559 /**
560 Check to see if the page at the given address is guarded or not.
561
562 @param[in] Address The address to check for.
563
564 @return TRUE The page at Address is guarded.
565 @return FALSE The page at Address is not guarded.
566 **/
567 BOOLEAN
568 EFIAPI
569 IsMemoryGuarded (
570 IN EFI_PHYSICAL_ADDRESS Address
571 )
572 {
573 return (GetGuardMapBit (Address) == 1);
574 }
575
576 /**
577 Set the page at the given address to be a Guard page.
578
579 This is done by changing the page table attribute to be NOT PRSENT.
580
581 @param[in] BaseAddress Page address to Guard at.
582
583 @return VOID.
584 **/
585 VOID
586 EFIAPI
587 SetGuardPage (
588 IN EFI_PHYSICAL_ADDRESS BaseAddress
589 )
590 {
591 if (mSmmMemoryAttribute != NULL) {
592 mOnGuarding = TRUE;
593 mSmmMemoryAttribute->SetMemoryAttributes (
594 mSmmMemoryAttribute,
595 BaseAddress,
596 EFI_PAGE_SIZE,
597 EFI_MEMORY_RP
598 );
599 mOnGuarding = FALSE;
600 }
601 }
602
603 /**
604 Unset the Guard page at the given address to the normal memory.
605
606 This is done by changing the page table attribute to be PRSENT.
607
608 @param[in] BaseAddress Page address to Guard at.
609
610 @return VOID.
611 **/
612 VOID
613 EFIAPI
614 UnsetGuardPage (
615 IN EFI_PHYSICAL_ADDRESS BaseAddress
616 )
617 {
618 if (mSmmMemoryAttribute != NULL) {
619 mOnGuarding = TRUE;
620 mSmmMemoryAttribute->ClearMemoryAttributes (
621 mSmmMemoryAttribute,
622 BaseAddress,
623 EFI_PAGE_SIZE,
624 EFI_MEMORY_RP
625 );
626 mOnGuarding = FALSE;
627 }
628 }
629
630 /**
631 Check to see if the memory at the given address should be guarded or not.
632
633 @param[in] MemoryType Memory type to check.
634 @param[in] AllocateType Allocation type to check.
635 @param[in] PageOrPool Indicate a page allocation or pool allocation.
636
637
638 @return TRUE The given type of memory should be guarded.
639 @return FALSE The given type of memory should not be guarded.
640 **/
641 BOOLEAN
642 IsMemoryTypeToGuard (
643 IN EFI_MEMORY_TYPE MemoryType,
644 IN EFI_ALLOCATE_TYPE AllocateType,
645 IN UINT8 PageOrPool
646 )
647 {
648 UINT64 TestBit;
649 UINT64 ConfigBit;
650
651 if ((PcdGet8 (PcdHeapGuardPropertyMask) & PageOrPool) == 0
652 || mOnGuarding
653 || AllocateType == AllocateAddress) {
654 return FALSE;
655 }
656
657 ConfigBit = 0;
658 if ((PageOrPool & GUARD_HEAP_TYPE_POOL) != 0) {
659 ConfigBit |= PcdGet64 (PcdHeapGuardPoolType);
660 }
661
662 if ((PageOrPool & GUARD_HEAP_TYPE_PAGE) != 0) {
663 ConfigBit |= PcdGet64 (PcdHeapGuardPageType);
664 }
665
666 if (MemoryType == EfiRuntimeServicesData ||
667 MemoryType == EfiRuntimeServicesCode) {
668 TestBit = LShiftU64 (1, MemoryType);
669 } else if (MemoryType == EfiMaxMemoryType) {
670 TestBit = (UINT64)-1;
671 } else {
672 TestBit = 0;
673 }
674
675 return ((ConfigBit & TestBit) != 0);
676 }
677
678 /**
679 Check to see if the pool at the given address should be guarded or not.
680
681 @param[in] MemoryType Pool type to check.
682
683
684 @return TRUE The given type of pool should be guarded.
685 @return FALSE The given type of pool should not be guarded.
686 **/
687 BOOLEAN
688 IsPoolTypeToGuard (
689 IN EFI_MEMORY_TYPE MemoryType
690 )
691 {
692 return IsMemoryTypeToGuard (MemoryType, AllocateAnyPages,
693 GUARD_HEAP_TYPE_POOL);
694 }
695
696 /**
697 Check to see if the page at the given address should be guarded or not.
698
699 @param[in] MemoryType Page type to check.
700 @param[in] AllocateType Allocation type to check.
701
702 @return TRUE The given type of page should be guarded.
703 @return FALSE The given type of page should not be guarded.
704 **/
705 BOOLEAN
706 IsPageTypeToGuard (
707 IN EFI_MEMORY_TYPE MemoryType,
708 IN EFI_ALLOCATE_TYPE AllocateType
709 )
710 {
711 return IsMemoryTypeToGuard (MemoryType, AllocateType, GUARD_HEAP_TYPE_PAGE);
712 }
713
714 /**
715 Check to see if the heap guard is enabled for page and/or pool allocation.
716
717 @return TRUE/FALSE.
718 **/
719 BOOLEAN
720 IsHeapGuardEnabled (
721 VOID
722 )
723 {
724 return IsMemoryTypeToGuard (EfiMaxMemoryType, AllocateAnyPages,
725 GUARD_HEAP_TYPE_POOL|GUARD_HEAP_TYPE_PAGE);
726 }
727
728 /**
729 Set head Guard and tail Guard for the given memory range.
730
731 @param[in] Memory Base address of memory to set guard for.
732 @param[in] NumberOfPages Memory size in pages.
733
734 @return VOID.
735 **/
736 VOID
737 SetGuardForMemory (
738 IN EFI_PHYSICAL_ADDRESS Memory,
739 IN UINTN NumberOfPages
740 )
741 {
742 EFI_PHYSICAL_ADDRESS GuardPage;
743
744 //
745 // Set tail Guard
746 //
747 GuardPage = Memory + EFI_PAGES_TO_SIZE (NumberOfPages);
748 if (!IsGuardPage (GuardPage)) {
749 SetGuardPage (GuardPage);
750 }
751
752 // Set head Guard
753 GuardPage = Memory - EFI_PAGES_TO_SIZE (1);
754 if (!IsGuardPage (GuardPage)) {
755 SetGuardPage (GuardPage);
756 }
757
758 //
759 // Mark the memory range as Guarded
760 //
761 SetGuardedMemoryBits (Memory, NumberOfPages);
762 }
763
764 /**
765 Unset head Guard and tail Guard for the given memory range.
766
767 @param[in] Memory Base address of memory to unset guard for.
768 @param[in] NumberOfPages Memory size in pages.
769
770 @return VOID.
771 **/
772 VOID
773 UnsetGuardForMemory (
774 IN EFI_PHYSICAL_ADDRESS Memory,
775 IN UINTN NumberOfPages
776 )
777 {
778 EFI_PHYSICAL_ADDRESS GuardPage;
779 UINT64 GuardBitmap;
780
781 if (NumberOfPages == 0) {
782 return;
783 }
784
785 //
786 // Head Guard must be one page before, if any.
787 //
788 // MSB-> 1 0 <-LSB
789 // -------------------
790 // Head Guard -> 0 1 -> Don't free Head Guard (shared Guard)
791 // Head Guard -> 0 0 -> Free Head Guard either (not shared Guard)
792 // 1 X -> Don't free first page (need a new Guard)
793 // (it'll be turned into a Guard page later)
794 // -------------------
795 // Start -> -1 -2
796 //
797 GuardPage = Memory - EFI_PAGES_TO_SIZE (1);
798 GuardBitmap = GetGuardedMemoryBits (Memory - EFI_PAGES_TO_SIZE (2), 2);
799 if ((GuardBitmap & BIT1) == 0) {
800 //
801 // Head Guard exists.
802 //
803 if ((GuardBitmap & BIT0) == 0) {
804 //
805 // If the head Guard is not a tail Guard of adjacent memory block,
806 // unset it.
807 //
808 UnsetGuardPage (GuardPage);
809 }
810 } else {
811 //
812 // Pages before memory to free are still in Guard. It's a partial free
813 // case. Turn first page of memory block to free into a new Guard.
814 //
815 SetGuardPage (Memory);
816 }
817
818 //
819 // Tail Guard must be the page after this memory block to free, if any.
820 //
821 // MSB-> 1 0 <-LSB
822 // --------------------
823 // 1 0 <- Tail Guard -> Don't free Tail Guard (shared Guard)
824 // 0 0 <- Tail Guard -> Free Tail Guard either (not shared Guard)
825 // X 1 -> Don't free last page (need a new Guard)
826 // (it'll be turned into a Guard page later)
827 // --------------------
828 // +1 +0 <- End
829 //
830 GuardPage = Memory + EFI_PAGES_TO_SIZE (NumberOfPages);
831 GuardBitmap = GetGuardedMemoryBits (GuardPage, 2);
832 if ((GuardBitmap & BIT0) == 0) {
833 //
834 // Tail Guard exists.
835 //
836 if ((GuardBitmap & BIT1) == 0) {
837 //
838 // If the tail Guard is not a head Guard of adjacent memory block,
839 // free it; otherwise, keep it.
840 //
841 UnsetGuardPage (GuardPage);
842 }
843 } else {
844 //
845 // Pages after memory to free are still in Guard. It's a partial free
846 // case. We need to keep one page to be a head Guard.
847 //
848 SetGuardPage (GuardPage - EFI_PAGES_TO_SIZE (1));
849 }
850
851 //
852 // No matter what, we just clear the mark of the Guarded memory.
853 //
854 ClearGuardedMemoryBits(Memory, NumberOfPages);
855 }
856
857 /**
858 Adjust address of free memory according to existing and/or required Guard.
859
860 This function will check if there're existing Guard pages of adjacent
861 memory blocks, and try to use it as the Guard page of the memory to be
862 allocated.
863
864 @param[in] Start Start address of free memory block.
865 @param[in] Size Size of free memory block.
866 @param[in] SizeRequested Size of memory to allocate.
867
868 @return The end address of memory block found.
869 @return 0 if no enough space for the required size of memory and its Guard.
870 **/
871 UINT64
872 AdjustMemoryS (
873 IN UINT64 Start,
874 IN UINT64 Size,
875 IN UINT64 SizeRequested
876 )
877 {
878 UINT64 Target;
879
880 //
881 // UEFI spec requires that allocated pool must be 8-byte aligned. If it's
882 // indicated to put the pool near the Tail Guard, we need extra bytes to
883 // make sure alignment of the returned pool address.
884 //
885 if ((PcdGet8 (PcdHeapGuardPropertyMask) & BIT7) == 0) {
886 SizeRequested = ALIGN_VALUE(SizeRequested, 8);
887 }
888
889 Target = Start + Size - SizeRequested;
890
891 //
892 // At least one more page needed for Guard page.
893 //
894 if (Size < (SizeRequested + EFI_PAGES_TO_SIZE (1))) {
895 return 0;
896 }
897
898 if (!IsGuardPage (Start + Size)) {
899 // No Guard at tail to share. One more page is needed.
900 Target -= EFI_PAGES_TO_SIZE (1);
901 }
902
903 // Out of range?
904 if (Target < Start) {
905 return 0;
906 }
907
908 // At the edge?
909 if (Target == Start) {
910 if (!IsGuardPage (Target - EFI_PAGES_TO_SIZE (1))) {
911 // No enough space for a new head Guard if no Guard at head to share.
912 return 0;
913 }
914 }
915
916 // OK, we have enough pages for memory and its Guards. Return the End of the
917 // free space.
918 return Target + SizeRequested - 1;
919 }
920
921 /**
922 Adjust the start address and number of pages to free according to Guard.
923
924 The purpose of this function is to keep the shared Guard page with adjacent
925 memory block if it's still in guard, or free it if no more sharing. Another
926 is to reserve pages as Guard pages in partial page free situation.
927
928 @param[in,out] Memory Base address of memory to free.
929 @param[in,out] NumberOfPages Size of memory to free.
930
931 @return VOID.
932 **/
933 VOID
934 AdjustMemoryF (
935 IN OUT EFI_PHYSICAL_ADDRESS *Memory,
936 IN OUT UINTN *NumberOfPages
937 )
938 {
939 EFI_PHYSICAL_ADDRESS Start;
940 EFI_PHYSICAL_ADDRESS MemoryToTest;
941 UINTN PagesToFree;
942 UINT64 GuardBitmap;
943
944 if (Memory == NULL || NumberOfPages == NULL || *NumberOfPages == 0) {
945 return;
946 }
947
948 Start = *Memory;
949 PagesToFree = *NumberOfPages;
950
951 //
952 // Head Guard must be one page before, if any.
953 //
954 // MSB-> 1 0 <-LSB
955 // -------------------
956 // Head Guard -> 0 1 -> Don't free Head Guard (shared Guard)
957 // Head Guard -> 0 0 -> Free Head Guard either (not shared Guard)
958 // 1 X -> Don't free first page (need a new Guard)
959 // (it'll be turned into a Guard page later)
960 // -------------------
961 // Start -> -1 -2
962 //
963 MemoryToTest = Start - EFI_PAGES_TO_SIZE (2);
964 GuardBitmap = GetGuardedMemoryBits (MemoryToTest, 2);
965 if ((GuardBitmap & BIT1) == 0) {
966 //
967 // Head Guard exists.
968 //
969 if ((GuardBitmap & BIT0) == 0) {
970 //
971 // If the head Guard is not a tail Guard of adjacent memory block,
972 // free it; otherwise, keep it.
973 //
974 Start -= EFI_PAGES_TO_SIZE (1);
975 PagesToFree += 1;
976 }
977 } else {
978 //
979 // No Head Guard, and pages before memory to free are still in Guard. It's a
980 // partial free case. We need to keep one page to be a tail Guard.
981 //
982 Start += EFI_PAGES_TO_SIZE (1);
983 PagesToFree -= 1;
984 }
985
986 //
987 // Tail Guard must be the page after this memory block to free, if any.
988 //
989 // MSB-> 1 0 <-LSB
990 // --------------------
991 // 1 0 <- Tail Guard -> Don't free Tail Guard (shared Guard)
992 // 0 0 <- Tail Guard -> Free Tail Guard either (not shared Guard)
993 // X 1 -> Don't free last page (need a new Guard)
994 // (it'll be turned into a Guard page later)
995 // --------------------
996 // +1 +0 <- End
997 //
998 MemoryToTest = Start + EFI_PAGES_TO_SIZE (PagesToFree);
999 GuardBitmap = GetGuardedMemoryBits (MemoryToTest, 2);
1000 if ((GuardBitmap & BIT0) == 0) {
1001 //
1002 // Tail Guard exists.
1003 //
1004 if ((GuardBitmap & BIT1) == 0) {
1005 //
1006 // If the tail Guard is not a head Guard of adjacent memory block,
1007 // free it; otherwise, keep it.
1008 //
1009 PagesToFree += 1;
1010 }
1011 } else if (PagesToFree > 0) {
1012 //
1013 // No Tail Guard, and pages after memory to free are still in Guard. It's a
1014 // partial free case. We need to keep one page to be a head Guard.
1015 //
1016 PagesToFree -= 1;
1017 }
1018
1019 *Memory = Start;
1020 *NumberOfPages = PagesToFree;
1021 }
1022
1023 /**
1024 Adjust the base and number of pages to really allocate according to Guard.
1025
1026 @param[in,out] Memory Base address of free memory.
1027 @param[in,out] NumberOfPages Size of memory to allocate.
1028
1029 @return VOID.
1030 **/
1031 VOID
1032 AdjustMemoryA (
1033 IN OUT EFI_PHYSICAL_ADDRESS *Memory,
1034 IN OUT UINTN *NumberOfPages
1035 )
1036 {
1037 //
1038 // FindFreePages() has already taken the Guard into account. It's safe to
1039 // adjust the start address and/or number of pages here, to make sure that
1040 // the Guards are also "allocated".
1041 //
1042 if (!IsGuardPage (*Memory + EFI_PAGES_TO_SIZE (*NumberOfPages))) {
1043 // No tail Guard, add one.
1044 *NumberOfPages += 1;
1045 }
1046
1047 if (!IsGuardPage (*Memory - EFI_PAGE_SIZE)) {
1048 // No head Guard, add one.
1049 *Memory -= EFI_PAGE_SIZE;
1050 *NumberOfPages += 1;
1051 }
1052 }
1053
1054 /**
1055 Adjust the pool head position to make sure the Guard page is adjavent to
1056 pool tail or pool head.
1057
1058 @param[in] Memory Base address of memory allocated.
1059 @param[in] NoPages Number of pages actually allocated.
1060 @param[in] Size Size of memory requested.
1061 (plus pool head/tail overhead)
1062
1063 @return Address of pool head
1064 **/
1065 VOID *
1066 AdjustPoolHeadA (
1067 IN EFI_PHYSICAL_ADDRESS Memory,
1068 IN UINTN NoPages,
1069 IN UINTN Size
1070 )
1071 {
1072 if (Memory == 0 || (PcdGet8 (PcdHeapGuardPropertyMask) & BIT7) != 0) {
1073 //
1074 // Pool head is put near the head Guard
1075 //
1076 return (VOID *)(UINTN)Memory;
1077 }
1078
1079 //
1080 // Pool head is put near the tail Guard
1081 //
1082 Size = ALIGN_VALUE (Size, 8);
1083 return (VOID *)(UINTN)(Memory + EFI_PAGES_TO_SIZE (NoPages) - Size);
1084 }
1085
1086 /**
1087 Get the page base address according to pool head address.
1088
1089 @param[in] Memory Head address of pool to free.
1090
1091 @return Address of pool head.
1092 **/
1093 VOID *
1094 AdjustPoolHeadF (
1095 IN EFI_PHYSICAL_ADDRESS Memory
1096 )
1097 {
1098 if (Memory == 0 || (PcdGet8 (PcdHeapGuardPropertyMask) & BIT7) != 0) {
1099 //
1100 // Pool head is put near the head Guard
1101 //
1102 return (VOID *)(UINTN)Memory;
1103 }
1104
1105 //
1106 // Pool head is put near the tail Guard
1107 //
1108 return (VOID *)(UINTN)(Memory & ~EFI_PAGE_MASK);
1109 }
1110
1111 /**
1112 Helper function of memory allocation with Guard pages.
1113
1114 @param FreePageList The free page node.
1115 @param NumberOfPages Number of pages to be allocated.
1116 @param MaxAddress Request to allocate memory below this address.
1117 @param MemoryType Type of memory requested.
1118
1119 @return Memory address of allocated pages.
1120 **/
1121 UINTN
1122 InternalAllocMaxAddressWithGuard (
1123 IN OUT LIST_ENTRY *FreePageList,
1124 IN UINTN NumberOfPages,
1125 IN UINTN MaxAddress,
1126 IN EFI_MEMORY_TYPE MemoryType
1127
1128 )
1129 {
1130 LIST_ENTRY *Node;
1131 FREE_PAGE_LIST *Pages;
1132 UINTN PagesToAlloc;
1133 UINTN HeadGuard;
1134 UINTN TailGuard;
1135 UINTN Address;
1136
1137 for (Node = FreePageList->BackLink; Node != FreePageList;
1138 Node = Node->BackLink) {
1139 Pages = BASE_CR (Node, FREE_PAGE_LIST, Link);
1140 if (Pages->NumberOfPages >= NumberOfPages &&
1141 (UINTN)Pages + EFI_PAGES_TO_SIZE (NumberOfPages) - 1 <= MaxAddress) {
1142
1143 //
1144 // We may need 1 or 2 more pages for Guard. Check it out.
1145 //
1146 PagesToAlloc = NumberOfPages;
1147 TailGuard = (UINTN)Pages + EFI_PAGES_TO_SIZE (Pages->NumberOfPages);
1148 if (!IsGuardPage (TailGuard)) {
1149 //
1150 // Add one if no Guard at the end of current free memory block.
1151 //
1152 PagesToAlloc += 1;
1153 TailGuard = 0;
1154 }
1155
1156 HeadGuard = (UINTN)Pages +
1157 EFI_PAGES_TO_SIZE (Pages->NumberOfPages - PagesToAlloc) -
1158 EFI_PAGE_SIZE;
1159 if (!IsGuardPage (HeadGuard)) {
1160 //
1161 // Add one if no Guard at the page before the address to allocate
1162 //
1163 PagesToAlloc += 1;
1164 HeadGuard = 0;
1165 }
1166
1167 if (Pages->NumberOfPages < PagesToAlloc) {
1168 // Not enough space to allocate memory with Guards? Try next block.
1169 continue;
1170 }
1171
1172 Address = InternalAllocPagesOnOneNode (Pages, PagesToAlloc, MaxAddress);
1173 ConvertSmmMemoryMapEntry(MemoryType, Address, PagesToAlloc, FALSE);
1174 CoreFreeMemoryMapStack();
1175 if (HeadGuard == 0) {
1176 // Don't pass the Guard page to user.
1177 Address += EFI_PAGE_SIZE;
1178 }
1179 SetGuardForMemory (Address, NumberOfPages);
1180 return Address;
1181 }
1182 }
1183
1184 return (UINTN)(-1);
1185 }
1186
1187 /**
1188 Helper function of memory free with Guard pages.
1189
1190 @param[in] Memory Base address of memory being freed.
1191 @param[in] NumberOfPages The number of pages to free.
1192 @param[in] AddRegion If this memory is new added region.
1193
1194 @retval EFI_NOT_FOUND Could not find the entry that covers the range.
1195 @retval EFI_INVALID_PARAMETER Address not aligned, Address is zero or NumberOfPages is zero.
1196 @return EFI_SUCCESS Pages successfully freed.
1197 **/
1198 EFI_STATUS
1199 SmmInternalFreePagesExWithGuard (
1200 IN EFI_PHYSICAL_ADDRESS Memory,
1201 IN UINTN NumberOfPages,
1202 IN BOOLEAN AddRegion
1203 )
1204 {
1205 EFI_PHYSICAL_ADDRESS MemoryToFree;
1206 UINTN PagesToFree;
1207
1208 if (((Memory & EFI_PAGE_MASK) != 0) || (Memory == 0) || (NumberOfPages == 0)) {
1209 return EFI_INVALID_PARAMETER;
1210 }
1211
1212 MemoryToFree = Memory;
1213 PagesToFree = NumberOfPages;
1214
1215 AdjustMemoryF (&MemoryToFree, &PagesToFree);
1216 UnsetGuardForMemory (Memory, NumberOfPages);
1217 if (PagesToFree == 0) {
1218 return EFI_SUCCESS;
1219 }
1220
1221 return SmmInternalFreePagesEx (MemoryToFree, PagesToFree, AddRegion);
1222 }
1223
1224 /**
1225 Set all Guard pages which cannot be set during the non-SMM mode time.
1226 **/
1227 VOID
1228 SetAllGuardPages (
1229 VOID
1230 )
1231 {
1232 UINTN Entries[GUARDED_HEAP_MAP_TABLE_DEPTH];
1233 UINTN Shifts[GUARDED_HEAP_MAP_TABLE_DEPTH];
1234 UINTN Indices[GUARDED_HEAP_MAP_TABLE_DEPTH];
1235 UINT64 Tables[GUARDED_HEAP_MAP_TABLE_DEPTH];
1236 UINT64 Addresses[GUARDED_HEAP_MAP_TABLE_DEPTH];
1237 UINT64 TableEntry;
1238 UINT64 Address;
1239 UINT64 GuardPage;
1240 INTN Level;
1241 UINTN Index;
1242 BOOLEAN OnGuarding;
1243
1244 if (mGuardedMemoryMap == 0 ||
1245 mMapLevel == 0 ||
1246 mMapLevel > GUARDED_HEAP_MAP_TABLE_DEPTH) {
1247 return;
1248 }
1249
1250 CopyMem (Entries, mLevelMask, sizeof (Entries));
1251 CopyMem (Shifts, mLevelShift, sizeof (Shifts));
1252
1253 SetMem (Tables, sizeof(Tables), 0);
1254 SetMem (Addresses, sizeof(Addresses), 0);
1255 SetMem (Indices, sizeof(Indices), 0);
1256
1257 Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;
1258 Tables[Level] = mGuardedMemoryMap;
1259 Address = 0;
1260 OnGuarding = FALSE;
1261
1262 DEBUG_CODE (
1263 DumpGuardedMemoryBitmap ();
1264 );
1265
1266 while (TRUE) {
1267 if (Indices[Level] > Entries[Level]) {
1268 Tables[Level] = 0;
1269 Level -= 1;
1270 } else {
1271
1272 TableEntry = ((UINT64 *)(UINTN)(Tables[Level]))[Indices[Level]];
1273 Address = Addresses[Level];
1274
1275 if (TableEntry == 0) {
1276
1277 OnGuarding = FALSE;
1278
1279 } else if (Level < GUARDED_HEAP_MAP_TABLE_DEPTH - 1) {
1280
1281 Level += 1;
1282 Tables[Level] = TableEntry;
1283 Addresses[Level] = Address;
1284 Indices[Level] = 0;
1285
1286 continue;
1287
1288 } else {
1289
1290 Index = 0;
1291 while (Index < GUARDED_HEAP_MAP_ENTRY_BITS) {
1292 if ((TableEntry & 1) == 1) {
1293 if (OnGuarding) {
1294 GuardPage = 0;
1295 } else {
1296 GuardPage = Address - EFI_PAGE_SIZE;
1297 }
1298 OnGuarding = TRUE;
1299 } else {
1300 if (OnGuarding) {
1301 GuardPage = Address;
1302 } else {
1303 GuardPage = 0;
1304 }
1305 OnGuarding = FALSE;
1306 }
1307
1308 if (GuardPage != 0) {
1309 SetGuardPage (GuardPage);
1310 }
1311
1312 if (TableEntry == 0) {
1313 break;
1314 }
1315
1316 TableEntry = RShiftU64 (TableEntry, 1);
1317 Address += EFI_PAGE_SIZE;
1318 Index += 1;
1319 }
1320 }
1321 }
1322
1323 if (Level < (GUARDED_HEAP_MAP_TABLE_DEPTH - (INTN)mMapLevel)) {
1324 break;
1325 }
1326
1327 Indices[Level] += 1;
1328 Address = (Level == 0) ? 0 : Addresses[Level - 1];
1329 Addresses[Level] = Address | LShiftU64(Indices[Level], Shifts[Level]);
1330
1331 }
1332 }
1333
1334 /**
1335 Hook function used to set all Guard pages after entering SMM mode.
1336 **/
1337 VOID
1338 SmmEntryPointMemoryManagementHook (
1339 VOID
1340 )
1341 {
1342 EFI_STATUS Status;
1343
1344 if (mSmmMemoryAttribute == NULL) {
1345 Status = SmmLocateProtocol (
1346 &gEdkiiSmmMemoryAttributeProtocolGuid,
1347 NULL,
1348 (VOID **)&mSmmMemoryAttribute
1349 );
1350 if (!EFI_ERROR(Status)) {
1351 SetAllGuardPages ();
1352 }
1353 }
1354 }
1355
1356 /**
1357 Helper function to convert a UINT64 value in binary to a string.
1358
1359 @param[in] Value Value of a UINT64 integer.
1360 @param[out] BinString String buffer to contain the conversion result.
1361
1362 @return VOID.
1363 **/
1364 VOID
1365 Uint64ToBinString (
1366 IN UINT64 Value,
1367 OUT CHAR8 *BinString
1368 )
1369 {
1370 UINTN Index;
1371
1372 if (BinString == NULL) {
1373 return;
1374 }
1375
1376 for (Index = 64; Index > 0; --Index) {
1377 BinString[Index - 1] = '0' + (Value & 1);
1378 Value = RShiftU64 (Value, 1);
1379 }
1380 BinString[64] = '\0';
1381 }
1382
1383 /**
1384 Dump the guarded memory bit map.
1385 **/
1386 VOID
1387 EFIAPI
1388 DumpGuardedMemoryBitmap (
1389 VOID
1390 )
1391 {
1392 UINTN Entries[GUARDED_HEAP_MAP_TABLE_DEPTH];
1393 UINTN Shifts[GUARDED_HEAP_MAP_TABLE_DEPTH];
1394 UINTN Indices[GUARDED_HEAP_MAP_TABLE_DEPTH];
1395 UINT64 Tables[GUARDED_HEAP_MAP_TABLE_DEPTH];
1396 UINT64 Addresses[GUARDED_HEAP_MAP_TABLE_DEPTH];
1397 UINT64 TableEntry;
1398 UINT64 Address;
1399 INTN Level;
1400 UINTN RepeatZero;
1401 CHAR8 String[GUARDED_HEAP_MAP_ENTRY_BITS + 1];
1402 CHAR8 *Ruler1;
1403 CHAR8 *Ruler2;
1404
1405 if (mGuardedMemoryMap == 0 ||
1406 mMapLevel == 0 ||
1407 mMapLevel > GUARDED_HEAP_MAP_TABLE_DEPTH) {
1408 return;
1409 }
1410
1411 Ruler1 = " 3 2 1 0";
1412 Ruler2 = "FEDCBA9876543210FEDCBA9876543210FEDCBA9876543210FEDCBA9876543210";
1413
1414 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "============================="
1415 " Guarded Memory Bitmap "
1416 "==============================\r\n"));
1417 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, " %a\r\n", Ruler1));
1418 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, " %a\r\n", Ruler2));
1419
1420 CopyMem (Entries, mLevelMask, sizeof (Entries));
1421 CopyMem (Shifts, mLevelShift, sizeof (Shifts));
1422
1423 SetMem (Indices, sizeof(Indices), 0);
1424 SetMem (Tables, sizeof(Tables), 0);
1425 SetMem (Addresses, sizeof(Addresses), 0);
1426
1427 Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;
1428 Tables[Level] = mGuardedMemoryMap;
1429 Address = 0;
1430 RepeatZero = 0;
1431
1432 while (TRUE) {
1433 if (Indices[Level] > Entries[Level]) {
1434
1435 Tables[Level] = 0;
1436 Level -= 1;
1437 RepeatZero = 0;
1438
1439 DEBUG ((
1440 HEAP_GUARD_DEBUG_LEVEL,
1441 "========================================="
1442 "=========================================\r\n"
1443 ));
1444
1445 } else {
1446
1447 TableEntry = ((UINT64 *)(UINTN)Tables[Level])[Indices[Level]];
1448 Address = Addresses[Level];
1449
1450 if (TableEntry == 0) {
1451
1452 if (Level == GUARDED_HEAP_MAP_TABLE_DEPTH - 1) {
1453 if (RepeatZero == 0) {
1454 Uint64ToBinString(TableEntry, String);
1455 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "%016lx: %a\r\n", Address, String));
1456 } else if (RepeatZero == 1) {
1457 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "... : ...\r\n"));
1458 }
1459 RepeatZero += 1;
1460 }
1461
1462 } else if (Level < GUARDED_HEAP_MAP_TABLE_DEPTH - 1) {
1463
1464 Level += 1;
1465 Tables[Level] = TableEntry;
1466 Addresses[Level] = Address;
1467 Indices[Level] = 0;
1468 RepeatZero = 0;
1469
1470 continue;
1471
1472 } else {
1473
1474 RepeatZero = 0;
1475 Uint64ToBinString(TableEntry, String);
1476 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "%016lx: %a\r\n", Address, String));
1477
1478 }
1479 }
1480
1481 if (Level < (GUARDED_HEAP_MAP_TABLE_DEPTH - (INTN)mMapLevel)) {
1482 break;
1483 }
1484
1485 Indices[Level] += 1;
1486 Address = (Level == 0) ? 0 : Addresses[Level - 1];
1487 Addresses[Level] = Address | LShiftU64(Indices[Level], Shifts[Level]);
1488
1489 }
1490 }
1491
1492 /**
1493 Debug function used to verify if the Guard page is well set or not.
1494
1495 @param[in] BaseAddress Address of memory to check.
1496 @param[in] NumberOfPages Size of memory in pages.
1497
1498 @return TRUE The head Guard and tail Guard are both well set.
1499 @return FALSE The head Guard and/or tail Guard are not well set.
1500 **/
1501 BOOLEAN
1502 VerifyMemoryGuard (
1503 IN EFI_PHYSICAL_ADDRESS BaseAddress,
1504 IN UINTN NumberOfPages
1505 )
1506 {
1507 EFI_STATUS Status;
1508 UINT64 Attribute;
1509 EFI_PHYSICAL_ADDRESS Address;
1510
1511 if (mSmmMemoryAttribute == NULL) {
1512 return TRUE;
1513 }
1514
1515 Attribute = 0;
1516 Address = BaseAddress - EFI_PAGE_SIZE;
1517 Status = mSmmMemoryAttribute->GetMemoryAttributes (
1518 mSmmMemoryAttribute,
1519 Address,
1520 EFI_PAGE_SIZE,
1521 &Attribute
1522 );
1523 if (EFI_ERROR (Status) || (Attribute & EFI_MEMORY_RP) == 0) {
1524 DEBUG ((DEBUG_ERROR, "Head Guard is not set at: %016lx (%016lX)!!!\r\n",
1525 Address, Attribute));
1526 DumpGuardedMemoryBitmap ();
1527 return FALSE;
1528 }
1529
1530 Attribute = 0;
1531 Address = BaseAddress + EFI_PAGES_TO_SIZE (NumberOfPages);
1532 Status = mSmmMemoryAttribute->GetMemoryAttributes (
1533 mSmmMemoryAttribute,
1534 Address,
1535 EFI_PAGE_SIZE,
1536 &Attribute
1537 );
1538 if (EFI_ERROR (Status) || (Attribute & EFI_MEMORY_RP) == 0) {
1539 DEBUG ((DEBUG_ERROR, "Tail Guard is not set at: %016lx (%016lX)!!!\r\n",
1540 Address, Attribute));
1541 DumpGuardedMemoryBitmap ();
1542 return FALSE;
1543 }
1544
1545 return TRUE;
1546 }
1547