]> git.proxmox.com Git - mirror_edk2.git/blob - MdeModulePkg/Core/PiSmmCore/HeapGuard.c
MdeModulePkg: Replace BSD License with BSD+Patent License
[mirror_edk2.git] / MdeModulePkg / Core / PiSmmCore / HeapGuard.c
1 /** @file
2 UEFI Heap Guard functions.
3
4 Copyright (c) 2017-2018, Intel Corporation. All rights reserved.<BR>
5 SPDX-License-Identifier: BSD-2-Clause-Patent
6
7 **/
8
9 #include "HeapGuard.h"
10
11 //
12 // Global to avoid infinite reentrance of memory allocation when updating
13 // page table attributes, which may need allocating pages for new PDE/PTE.
14 //
15 GLOBAL_REMOVE_IF_UNREFERENCED BOOLEAN mOnGuarding = FALSE;
16
17 //
18 // Pointer to table tracking the Guarded memory with bitmap, in which '1'
19 // is used to indicate memory guarded. '0' might be free memory or Guard
20 // page itself, depending on status of memory adjacent to it.
21 //
22 GLOBAL_REMOVE_IF_UNREFERENCED UINT64 mGuardedMemoryMap = 0;
23
24 //
25 // Current depth level of map table pointed by mGuardedMemoryMap.
26 // mMapLevel must be initialized at least by 1. It will be automatically
27 // updated according to the address of memory just tracked.
28 //
29 GLOBAL_REMOVE_IF_UNREFERENCED UINTN mMapLevel = 1;
30
31 //
32 // Shift and mask for each level of map table
33 //
34 GLOBAL_REMOVE_IF_UNREFERENCED UINTN mLevelShift[GUARDED_HEAP_MAP_TABLE_DEPTH]
35 = GUARDED_HEAP_MAP_TABLE_DEPTH_SHIFTS;
36 GLOBAL_REMOVE_IF_UNREFERENCED UINTN mLevelMask[GUARDED_HEAP_MAP_TABLE_DEPTH]
37 = GUARDED_HEAP_MAP_TABLE_DEPTH_MASKS;
38
39 //
40 // SMM memory attribute protocol
41 //
42 EDKII_SMM_MEMORY_ATTRIBUTE_PROTOCOL *mSmmMemoryAttribute = NULL;
43
44 /**
45 Set corresponding bits in bitmap table to 1 according to the address.
46
47 @param[in] Address Start address to set for.
48 @param[in] BitNumber Number of bits to set.
49 @param[in] BitMap Pointer to bitmap which covers the Address.
50
51 @return VOID
52 **/
53 STATIC
54 VOID
55 SetBits (
56 IN EFI_PHYSICAL_ADDRESS Address,
57 IN UINTN BitNumber,
58 IN UINT64 *BitMap
59 )
60 {
61 UINTN Lsbs;
62 UINTN Qwords;
63 UINTN Msbs;
64 UINTN StartBit;
65 UINTN EndBit;
66
67 StartBit = (UINTN)GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address);
68 EndBit = (StartBit + BitNumber - 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
69
70 if ((StartBit + BitNumber) >= GUARDED_HEAP_MAP_ENTRY_BITS) {
71 Msbs = (GUARDED_HEAP_MAP_ENTRY_BITS - StartBit) %
72 GUARDED_HEAP_MAP_ENTRY_BITS;
73 Lsbs = (EndBit + 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
74 Qwords = (BitNumber - Msbs) / GUARDED_HEAP_MAP_ENTRY_BITS;
75 } else {
76 Msbs = BitNumber;
77 Lsbs = 0;
78 Qwords = 0;
79 }
80
81 if (Msbs > 0) {
82 *BitMap |= LShiftU64 (LShiftU64 (1, Msbs) - 1, StartBit);
83 BitMap += 1;
84 }
85
86 if (Qwords > 0) {
87 SetMem64 ((VOID *)BitMap, Qwords * GUARDED_HEAP_MAP_ENTRY_BYTES,
88 (UINT64)-1);
89 BitMap += Qwords;
90 }
91
92 if (Lsbs > 0) {
93 *BitMap |= (LShiftU64 (1, Lsbs) - 1);
94 }
95 }
96
97 /**
98 Set corresponding bits in bitmap table to 0 according to the address.
99
100 @param[in] Address Start address to set for.
101 @param[in] BitNumber Number of bits to set.
102 @param[in] BitMap Pointer to bitmap which covers the Address.
103
104 @return VOID.
105 **/
106 STATIC
107 VOID
108 ClearBits (
109 IN EFI_PHYSICAL_ADDRESS Address,
110 IN UINTN BitNumber,
111 IN UINT64 *BitMap
112 )
113 {
114 UINTN Lsbs;
115 UINTN Qwords;
116 UINTN Msbs;
117 UINTN StartBit;
118 UINTN EndBit;
119
120 StartBit = (UINTN)GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address);
121 EndBit = (StartBit + BitNumber - 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
122
123 if ((StartBit + BitNumber) >= GUARDED_HEAP_MAP_ENTRY_BITS) {
124 Msbs = (GUARDED_HEAP_MAP_ENTRY_BITS - StartBit) %
125 GUARDED_HEAP_MAP_ENTRY_BITS;
126 Lsbs = (EndBit + 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
127 Qwords = (BitNumber - Msbs) / GUARDED_HEAP_MAP_ENTRY_BITS;
128 } else {
129 Msbs = BitNumber;
130 Lsbs = 0;
131 Qwords = 0;
132 }
133
134 if (Msbs > 0) {
135 *BitMap &= ~LShiftU64 (LShiftU64 (1, Msbs) - 1, StartBit);
136 BitMap += 1;
137 }
138
139 if (Qwords > 0) {
140 SetMem64 ((VOID *)BitMap, Qwords * GUARDED_HEAP_MAP_ENTRY_BYTES, 0);
141 BitMap += Qwords;
142 }
143
144 if (Lsbs > 0) {
145 *BitMap &= ~(LShiftU64 (1, Lsbs) - 1);
146 }
147 }
148
149 /**
150 Get corresponding bits in bitmap table according to the address.
151
152 The value of bit 0 corresponds to the status of memory at given Address.
153 No more than 64 bits can be retrieved in one call.
154
155 @param[in] Address Start address to retrieve bits for.
156 @param[in] BitNumber Number of bits to get.
157 @param[in] BitMap Pointer to bitmap which covers the Address.
158
159 @return An integer containing the bits information.
160 **/
161 STATIC
162 UINT64
163 GetBits (
164 IN EFI_PHYSICAL_ADDRESS Address,
165 IN UINTN BitNumber,
166 IN UINT64 *BitMap
167 )
168 {
169 UINTN StartBit;
170 UINTN EndBit;
171 UINTN Lsbs;
172 UINTN Msbs;
173 UINT64 Result;
174
175 ASSERT (BitNumber <= GUARDED_HEAP_MAP_ENTRY_BITS);
176
177 StartBit = (UINTN)GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address);
178 EndBit = (StartBit + BitNumber - 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
179
180 if ((StartBit + BitNumber) > GUARDED_HEAP_MAP_ENTRY_BITS) {
181 Msbs = GUARDED_HEAP_MAP_ENTRY_BITS - StartBit;
182 Lsbs = (EndBit + 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
183 } else {
184 Msbs = BitNumber;
185 Lsbs = 0;
186 }
187
188 if (StartBit == 0 && BitNumber == GUARDED_HEAP_MAP_ENTRY_BITS) {
189 Result = *BitMap;
190 } else {
191 Result = RShiftU64((*BitMap), StartBit) & (LShiftU64(1, Msbs) - 1);
192 if (Lsbs > 0) {
193 BitMap += 1;
194 Result |= LShiftU64 ((*BitMap) & (LShiftU64 (1, Lsbs) - 1), Msbs);
195 }
196 }
197
198 return Result;
199 }
200
201 /**
202 Helper function to allocate pages without Guard for internal uses.
203
204 @param[in] Pages Page number.
205
206 @return Address of memory allocated.
207 **/
208 VOID *
209 PageAlloc (
210 IN UINTN Pages
211 )
212 {
213 EFI_STATUS Status;
214 EFI_PHYSICAL_ADDRESS Memory;
215
216 Status = SmmInternalAllocatePages (AllocateAnyPages, EfiRuntimeServicesData,
217 Pages, &Memory, FALSE);
218 if (EFI_ERROR (Status)) {
219 Memory = 0;
220 }
221
222 return (VOID *)(UINTN)Memory;
223 }
224
225 /**
226 Locate the pointer of bitmap from the guarded memory bitmap tables, which
227 covers the given Address.
228
229 @param[in] Address Start address to search the bitmap for.
230 @param[in] AllocMapUnit Flag to indicate memory allocation for the table.
231 @param[out] BitMap Pointer to bitmap which covers the Address.
232
233 @return The bit number from given Address to the end of current map table.
234 **/
235 UINTN
236 FindGuardedMemoryMap (
237 IN EFI_PHYSICAL_ADDRESS Address,
238 IN BOOLEAN AllocMapUnit,
239 OUT UINT64 **BitMap
240 )
241 {
242 UINTN Level;
243 UINT64 *GuardMap;
244 UINT64 MapMemory;
245 UINTN Index;
246 UINTN Size;
247 UINTN BitsToUnitEnd;
248
249 //
250 // Adjust current map table depth according to the address to access
251 //
252 while (AllocMapUnit &&
253 mMapLevel < GUARDED_HEAP_MAP_TABLE_DEPTH &&
254 RShiftU64 (
255 Address,
256 mLevelShift[GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel - 1]
257 ) != 0) {
258
259 if (mGuardedMemoryMap != 0) {
260 Size = (mLevelMask[GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel - 1] + 1)
261 * GUARDED_HEAP_MAP_ENTRY_BYTES;
262 MapMemory = (UINT64)(UINTN)PageAlloc (EFI_SIZE_TO_PAGES (Size));
263 ASSERT (MapMemory != 0);
264
265 SetMem ((VOID *)(UINTN)MapMemory, Size, 0);
266
267 *(UINT64 *)(UINTN)MapMemory = mGuardedMemoryMap;
268 mGuardedMemoryMap = MapMemory;
269 }
270
271 mMapLevel++;
272
273 }
274
275 GuardMap = &mGuardedMemoryMap;
276 for (Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;
277 Level < GUARDED_HEAP_MAP_TABLE_DEPTH;
278 ++Level) {
279
280 if (*GuardMap == 0) {
281 if (!AllocMapUnit) {
282 GuardMap = NULL;
283 break;
284 }
285
286 Size = (mLevelMask[Level] + 1) * GUARDED_HEAP_MAP_ENTRY_BYTES;
287 MapMemory = (UINT64)(UINTN)PageAlloc (EFI_SIZE_TO_PAGES (Size));
288 ASSERT (MapMemory != 0);
289
290 SetMem ((VOID *)(UINTN)MapMemory, Size, 0);
291 *GuardMap = MapMemory;
292 }
293
294 Index = (UINTN)RShiftU64 (Address, mLevelShift[Level]);
295 Index &= mLevelMask[Level];
296 GuardMap = (UINT64 *)(UINTN)((*GuardMap) + Index * sizeof (UINT64));
297
298 }
299
300 BitsToUnitEnd = GUARDED_HEAP_MAP_BITS - GUARDED_HEAP_MAP_BIT_INDEX (Address);
301 *BitMap = GuardMap;
302
303 return BitsToUnitEnd;
304 }
305
306 /**
307 Set corresponding bits in bitmap table to 1 according to given memory range.
308
309 @param[in] Address Memory address to guard from.
310 @param[in] NumberOfPages Number of pages to guard.
311
312 @return VOID
313 **/
314 VOID
315 EFIAPI
316 SetGuardedMemoryBits (
317 IN EFI_PHYSICAL_ADDRESS Address,
318 IN UINTN NumberOfPages
319 )
320 {
321 UINT64 *BitMap;
322 UINTN Bits;
323 UINTN BitsToUnitEnd;
324
325 while (NumberOfPages > 0) {
326 BitsToUnitEnd = FindGuardedMemoryMap (Address, TRUE, &BitMap);
327 ASSERT (BitMap != NULL);
328
329 if (NumberOfPages > BitsToUnitEnd) {
330 // Cross map unit
331 Bits = BitsToUnitEnd;
332 } else {
333 Bits = NumberOfPages;
334 }
335
336 SetBits (Address, Bits, BitMap);
337
338 NumberOfPages -= Bits;
339 Address += EFI_PAGES_TO_SIZE (Bits);
340 }
341 }
342
343 /**
344 Clear corresponding bits in bitmap table according to given memory range.
345
346 @param[in] Address Memory address to unset from.
347 @param[in] NumberOfPages Number of pages to unset guard.
348
349 @return VOID
350 **/
351 VOID
352 EFIAPI
353 ClearGuardedMemoryBits (
354 IN EFI_PHYSICAL_ADDRESS Address,
355 IN UINTN NumberOfPages
356 )
357 {
358 UINT64 *BitMap;
359 UINTN Bits;
360 UINTN BitsToUnitEnd;
361
362 while (NumberOfPages > 0) {
363 BitsToUnitEnd = FindGuardedMemoryMap (Address, TRUE, &BitMap);
364 ASSERT (BitMap != NULL);
365
366 if (NumberOfPages > BitsToUnitEnd) {
367 // Cross map unit
368 Bits = BitsToUnitEnd;
369 } else {
370 Bits = NumberOfPages;
371 }
372
373 ClearBits (Address, Bits, BitMap);
374
375 NumberOfPages -= Bits;
376 Address += EFI_PAGES_TO_SIZE (Bits);
377 }
378 }
379
380 /**
381 Retrieve corresponding bits in bitmap table according to given memory range.
382
383 @param[in] Address Memory address to retrieve from.
384 @param[in] NumberOfPages Number of pages to retrieve.
385
386 @return An integer containing the guarded memory bitmap.
387 **/
388 UINTN
389 GetGuardedMemoryBits (
390 IN EFI_PHYSICAL_ADDRESS Address,
391 IN UINTN NumberOfPages
392 )
393 {
394 UINT64 *BitMap;
395 UINTN Bits;
396 UINTN Result;
397 UINTN Shift;
398 UINTN BitsToUnitEnd;
399
400 ASSERT (NumberOfPages <= GUARDED_HEAP_MAP_ENTRY_BITS);
401
402 Result = 0;
403 Shift = 0;
404 while (NumberOfPages > 0) {
405 BitsToUnitEnd = FindGuardedMemoryMap (Address, FALSE, &BitMap);
406
407 if (NumberOfPages > BitsToUnitEnd) {
408 // Cross map unit
409 Bits = BitsToUnitEnd;
410 } else {
411 Bits = NumberOfPages;
412 }
413
414 if (BitMap != NULL) {
415 Result |= LShiftU64 (GetBits (Address, Bits, BitMap), Shift);
416 }
417
418 Shift += Bits;
419 NumberOfPages -= Bits;
420 Address += EFI_PAGES_TO_SIZE (Bits);
421 }
422
423 return Result;
424 }
425
426 /**
427 Get bit value in bitmap table for the given address.
428
429 @param[in] Address The address to retrieve for.
430
431 @return 1 or 0.
432 **/
433 UINTN
434 EFIAPI
435 GetGuardMapBit (
436 IN EFI_PHYSICAL_ADDRESS Address
437 )
438 {
439 UINT64 *GuardMap;
440
441 FindGuardedMemoryMap (Address, FALSE, &GuardMap);
442 if (GuardMap != NULL) {
443 if (RShiftU64 (*GuardMap,
444 GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address)) & 1) {
445 return 1;
446 }
447 }
448
449 return 0;
450 }
451
452
453 /**
454 Check to see if the page at the given address is a Guard page or not.
455
456 @param[in] Address The address to check for.
457
458 @return TRUE The page at Address is a Guard page.
459 @return FALSE The page at Address is not a Guard page.
460 **/
461 BOOLEAN
462 EFIAPI
463 IsGuardPage (
464 IN EFI_PHYSICAL_ADDRESS Address
465 )
466 {
467 UINTN BitMap;
468
469 //
470 // There must be at least one guarded page before and/or after given
471 // address if it's a Guard page. The bitmap pattern should be one of
472 // 001, 100 and 101
473 //
474 BitMap = GetGuardedMemoryBits (Address - EFI_PAGE_SIZE, 3);
475 return ((BitMap == BIT0) || (BitMap == BIT2) || (BitMap == (BIT2 | BIT0)));
476 }
477
478
479
480 /**
481 Check to see if the page at the given address is guarded or not.
482
483 @param[in] Address The address to check for.
484
485 @return TRUE The page at Address is guarded.
486 @return FALSE The page at Address is not guarded.
487 **/
488 BOOLEAN
489 EFIAPI
490 IsMemoryGuarded (
491 IN EFI_PHYSICAL_ADDRESS Address
492 )
493 {
494 return (GetGuardMapBit (Address) == 1);
495 }
496
497 /**
498 Set the page at the given address to be a Guard page.
499
500 This is done by changing the page table attribute to be NOT PRSENT.
501
502 @param[in] BaseAddress Page address to Guard at.
503
504 @return VOID.
505 **/
506 VOID
507 EFIAPI
508 SetGuardPage (
509 IN EFI_PHYSICAL_ADDRESS BaseAddress
510 )
511 {
512 EFI_STATUS Status;
513
514 if (mSmmMemoryAttribute != NULL) {
515 mOnGuarding = TRUE;
516 Status = mSmmMemoryAttribute->SetMemoryAttributes (
517 mSmmMemoryAttribute,
518 BaseAddress,
519 EFI_PAGE_SIZE,
520 EFI_MEMORY_RP
521 );
522 ASSERT_EFI_ERROR (Status);
523 mOnGuarding = FALSE;
524 }
525 }
526
527 /**
528 Unset the Guard page at the given address to the normal memory.
529
530 This is done by changing the page table attribute to be PRSENT.
531
532 @param[in] BaseAddress Page address to Guard at.
533
534 @return VOID.
535 **/
536 VOID
537 EFIAPI
538 UnsetGuardPage (
539 IN EFI_PHYSICAL_ADDRESS BaseAddress
540 )
541 {
542 EFI_STATUS Status;
543
544 if (mSmmMemoryAttribute != NULL) {
545 mOnGuarding = TRUE;
546 Status = mSmmMemoryAttribute->ClearMemoryAttributes (
547 mSmmMemoryAttribute,
548 BaseAddress,
549 EFI_PAGE_SIZE,
550 EFI_MEMORY_RP
551 );
552 ASSERT_EFI_ERROR (Status);
553 mOnGuarding = FALSE;
554 }
555 }
556
557 /**
558 Check to see if the memory at the given address should be guarded or not.
559
560 @param[in] MemoryType Memory type to check.
561 @param[in] AllocateType Allocation type to check.
562 @param[in] PageOrPool Indicate a page allocation or pool allocation.
563
564
565 @return TRUE The given type of memory should be guarded.
566 @return FALSE The given type of memory should not be guarded.
567 **/
568 BOOLEAN
569 IsMemoryTypeToGuard (
570 IN EFI_MEMORY_TYPE MemoryType,
571 IN EFI_ALLOCATE_TYPE AllocateType,
572 IN UINT8 PageOrPool
573 )
574 {
575 UINT64 TestBit;
576 UINT64 ConfigBit;
577
578 if ((PcdGet8 (PcdHeapGuardPropertyMask) & PageOrPool) == 0
579 || mOnGuarding
580 || AllocateType == AllocateAddress) {
581 return FALSE;
582 }
583
584 ConfigBit = 0;
585 if ((PageOrPool & GUARD_HEAP_TYPE_POOL) != 0) {
586 ConfigBit |= PcdGet64 (PcdHeapGuardPoolType);
587 }
588
589 if ((PageOrPool & GUARD_HEAP_TYPE_PAGE) != 0) {
590 ConfigBit |= PcdGet64 (PcdHeapGuardPageType);
591 }
592
593 if (MemoryType == EfiRuntimeServicesData ||
594 MemoryType == EfiRuntimeServicesCode) {
595 TestBit = LShiftU64 (1, MemoryType);
596 } else if (MemoryType == EfiMaxMemoryType) {
597 TestBit = (UINT64)-1;
598 } else {
599 TestBit = 0;
600 }
601
602 return ((ConfigBit & TestBit) != 0);
603 }
604
605 /**
606 Check to see if the pool at the given address should be guarded or not.
607
608 @param[in] MemoryType Pool type to check.
609
610
611 @return TRUE The given type of pool should be guarded.
612 @return FALSE The given type of pool should not be guarded.
613 **/
614 BOOLEAN
615 IsPoolTypeToGuard (
616 IN EFI_MEMORY_TYPE MemoryType
617 )
618 {
619 return IsMemoryTypeToGuard (MemoryType, AllocateAnyPages,
620 GUARD_HEAP_TYPE_POOL);
621 }
622
623 /**
624 Check to see if the page at the given address should be guarded or not.
625
626 @param[in] MemoryType Page type to check.
627 @param[in] AllocateType Allocation type to check.
628
629 @return TRUE The given type of page should be guarded.
630 @return FALSE The given type of page should not be guarded.
631 **/
632 BOOLEAN
633 IsPageTypeToGuard (
634 IN EFI_MEMORY_TYPE MemoryType,
635 IN EFI_ALLOCATE_TYPE AllocateType
636 )
637 {
638 return IsMemoryTypeToGuard (MemoryType, AllocateType, GUARD_HEAP_TYPE_PAGE);
639 }
640
641 /**
642 Check to see if the heap guard is enabled for page and/or pool allocation.
643
644 @return TRUE/FALSE.
645 **/
646 BOOLEAN
647 IsHeapGuardEnabled (
648 VOID
649 )
650 {
651 return IsMemoryTypeToGuard (EfiMaxMemoryType, AllocateAnyPages,
652 GUARD_HEAP_TYPE_POOL|GUARD_HEAP_TYPE_PAGE);
653 }
654
655 /**
656 Set head Guard and tail Guard for the given memory range.
657
658 @param[in] Memory Base address of memory to set guard for.
659 @param[in] NumberOfPages Memory size in pages.
660
661 @return VOID.
662 **/
663 VOID
664 SetGuardForMemory (
665 IN EFI_PHYSICAL_ADDRESS Memory,
666 IN UINTN NumberOfPages
667 )
668 {
669 EFI_PHYSICAL_ADDRESS GuardPage;
670
671 //
672 // Set tail Guard
673 //
674 GuardPage = Memory + EFI_PAGES_TO_SIZE (NumberOfPages);
675 if (!IsGuardPage (GuardPage)) {
676 SetGuardPage (GuardPage);
677 }
678
679 // Set head Guard
680 GuardPage = Memory - EFI_PAGES_TO_SIZE (1);
681 if (!IsGuardPage (GuardPage)) {
682 SetGuardPage (GuardPage);
683 }
684
685 //
686 // Mark the memory range as Guarded
687 //
688 SetGuardedMemoryBits (Memory, NumberOfPages);
689 }
690
691 /**
692 Unset head Guard and tail Guard for the given memory range.
693
694 @param[in] Memory Base address of memory to unset guard for.
695 @param[in] NumberOfPages Memory size in pages.
696
697 @return VOID.
698 **/
699 VOID
700 UnsetGuardForMemory (
701 IN EFI_PHYSICAL_ADDRESS Memory,
702 IN UINTN NumberOfPages
703 )
704 {
705 EFI_PHYSICAL_ADDRESS GuardPage;
706 UINT64 GuardBitmap;
707
708 if (NumberOfPages == 0) {
709 return;
710 }
711
712 //
713 // Head Guard must be one page before, if any.
714 //
715 // MSB-> 1 0 <-LSB
716 // -------------------
717 // Head Guard -> 0 1 -> Don't free Head Guard (shared Guard)
718 // Head Guard -> 0 0 -> Free Head Guard either (not shared Guard)
719 // 1 X -> Don't free first page (need a new Guard)
720 // (it'll be turned into a Guard page later)
721 // -------------------
722 // Start -> -1 -2
723 //
724 GuardPage = Memory - EFI_PAGES_TO_SIZE (1);
725 GuardBitmap = GetGuardedMemoryBits (Memory - EFI_PAGES_TO_SIZE (2), 2);
726 if ((GuardBitmap & BIT1) == 0) {
727 //
728 // Head Guard exists.
729 //
730 if ((GuardBitmap & BIT0) == 0) {
731 //
732 // If the head Guard is not a tail Guard of adjacent memory block,
733 // unset it.
734 //
735 UnsetGuardPage (GuardPage);
736 }
737 } else {
738 //
739 // Pages before memory to free are still in Guard. It's a partial free
740 // case. Turn first page of memory block to free into a new Guard.
741 //
742 SetGuardPage (Memory);
743 }
744
745 //
746 // Tail Guard must be the page after this memory block to free, if any.
747 //
748 // MSB-> 1 0 <-LSB
749 // --------------------
750 // 1 0 <- Tail Guard -> Don't free Tail Guard (shared Guard)
751 // 0 0 <- Tail Guard -> Free Tail Guard either (not shared Guard)
752 // X 1 -> Don't free last page (need a new Guard)
753 // (it'll be turned into a Guard page later)
754 // --------------------
755 // +1 +0 <- End
756 //
757 GuardPage = Memory + EFI_PAGES_TO_SIZE (NumberOfPages);
758 GuardBitmap = GetGuardedMemoryBits (GuardPage, 2);
759 if ((GuardBitmap & BIT0) == 0) {
760 //
761 // Tail Guard exists.
762 //
763 if ((GuardBitmap & BIT1) == 0) {
764 //
765 // If the tail Guard is not a head Guard of adjacent memory block,
766 // free it; otherwise, keep it.
767 //
768 UnsetGuardPage (GuardPage);
769 }
770 } else {
771 //
772 // Pages after memory to free are still in Guard. It's a partial free
773 // case. We need to keep one page to be a head Guard.
774 //
775 SetGuardPage (GuardPage - EFI_PAGES_TO_SIZE (1));
776 }
777
778 //
779 // No matter what, we just clear the mark of the Guarded memory.
780 //
781 ClearGuardedMemoryBits(Memory, NumberOfPages);
782 }
783
784
785
786 /**
787 Adjust the start address and number of pages to free according to Guard.
788
789 The purpose of this function is to keep the shared Guard page with adjacent
790 memory block if it's still in guard, or free it if no more sharing. Another
791 is to reserve pages as Guard pages in partial page free situation.
792
793 @param[in,out] Memory Base address of memory to free.
794 @param[in,out] NumberOfPages Size of memory to free.
795
796 @return VOID.
797 **/
798 VOID
799 AdjustMemoryF (
800 IN OUT EFI_PHYSICAL_ADDRESS *Memory,
801 IN OUT UINTN *NumberOfPages
802 )
803 {
804 EFI_PHYSICAL_ADDRESS Start;
805 EFI_PHYSICAL_ADDRESS MemoryToTest;
806 UINTN PagesToFree;
807 UINT64 GuardBitmap;
808 UINT64 Attributes;
809
810 if (Memory == NULL || NumberOfPages == NULL || *NumberOfPages == 0) {
811 return;
812 }
813
814 Start = *Memory;
815 PagesToFree = *NumberOfPages;
816
817 //
818 // In case the memory to free is marked as read-only (e.g. EfiRuntimeServicesCode).
819 //
820 if (mSmmMemoryAttribute != NULL) {
821 Attributes = 0;
822 mSmmMemoryAttribute->GetMemoryAttributes (
823 mSmmMemoryAttribute,
824 Start,
825 EFI_PAGES_TO_SIZE (PagesToFree),
826 &Attributes
827 );
828 if ((Attributes & EFI_MEMORY_RO) != 0) {
829 mSmmMemoryAttribute->ClearMemoryAttributes (
830 mSmmMemoryAttribute,
831 Start,
832 EFI_PAGES_TO_SIZE (PagesToFree),
833 EFI_MEMORY_RO
834 );
835 }
836 }
837
838 //
839 // Head Guard must be one page before, if any.
840 //
841 // MSB-> 1 0 <-LSB
842 // -------------------
843 // Head Guard -> 0 1 -> Don't free Head Guard (shared Guard)
844 // Head Guard -> 0 0 -> Free Head Guard either (not shared Guard)
845 // 1 X -> Don't free first page (need a new Guard)
846 // (it'll be turned into a Guard page later)
847 // -------------------
848 // Start -> -1 -2
849 //
850 MemoryToTest = Start - EFI_PAGES_TO_SIZE (2);
851 GuardBitmap = GetGuardedMemoryBits (MemoryToTest, 2);
852 if ((GuardBitmap & BIT1) == 0) {
853 //
854 // Head Guard exists.
855 //
856 if ((GuardBitmap & BIT0) == 0) {
857 //
858 // If the head Guard is not a tail Guard of adjacent memory block,
859 // free it; otherwise, keep it.
860 //
861 Start -= EFI_PAGES_TO_SIZE (1);
862 PagesToFree += 1;
863 }
864 } else {
865 //
866 // No Head Guard, and pages before memory to free are still in Guard. It's a
867 // partial free case. We need to keep one page to be a tail Guard.
868 //
869 Start += EFI_PAGES_TO_SIZE (1);
870 PagesToFree -= 1;
871 }
872
873 //
874 // Tail Guard must be the page after this memory block to free, if any.
875 //
876 // MSB-> 1 0 <-LSB
877 // --------------------
878 // 1 0 <- Tail Guard -> Don't free Tail Guard (shared Guard)
879 // 0 0 <- Tail Guard -> Free Tail Guard either (not shared Guard)
880 // X 1 -> Don't free last page (need a new Guard)
881 // (it'll be turned into a Guard page later)
882 // --------------------
883 // +1 +0 <- End
884 //
885 MemoryToTest = Start + EFI_PAGES_TO_SIZE (PagesToFree);
886 GuardBitmap = GetGuardedMemoryBits (MemoryToTest, 2);
887 if ((GuardBitmap & BIT0) == 0) {
888 //
889 // Tail Guard exists.
890 //
891 if ((GuardBitmap & BIT1) == 0) {
892 //
893 // If the tail Guard is not a head Guard of adjacent memory block,
894 // free it; otherwise, keep it.
895 //
896 PagesToFree += 1;
897 }
898 } else if (PagesToFree > 0) {
899 //
900 // No Tail Guard, and pages after memory to free are still in Guard. It's a
901 // partial free case. We need to keep one page to be a head Guard.
902 //
903 PagesToFree -= 1;
904 }
905
906 *Memory = Start;
907 *NumberOfPages = PagesToFree;
908 }
909
910
911 /**
912 Adjust the pool head position to make sure the Guard page is adjavent to
913 pool tail or pool head.
914
915 @param[in] Memory Base address of memory allocated.
916 @param[in] NoPages Number of pages actually allocated.
917 @param[in] Size Size of memory requested.
918 (plus pool head/tail overhead)
919
920 @return Address of pool head
921 **/
922 VOID *
923 AdjustPoolHeadA (
924 IN EFI_PHYSICAL_ADDRESS Memory,
925 IN UINTN NoPages,
926 IN UINTN Size
927 )
928 {
929 if (Memory == 0 || (PcdGet8 (PcdHeapGuardPropertyMask) & BIT7) != 0) {
930 //
931 // Pool head is put near the head Guard
932 //
933 return (VOID *)(UINTN)Memory;
934 }
935
936 //
937 // Pool head is put near the tail Guard
938 //
939 Size = ALIGN_VALUE (Size, 8);
940 return (VOID *)(UINTN)(Memory + EFI_PAGES_TO_SIZE (NoPages) - Size);
941 }
942
943 /**
944 Get the page base address according to pool head address.
945
946 @param[in] Memory Head address of pool to free.
947
948 @return Address of pool head.
949 **/
950 VOID *
951 AdjustPoolHeadF (
952 IN EFI_PHYSICAL_ADDRESS Memory
953 )
954 {
955 if (Memory == 0 || (PcdGet8 (PcdHeapGuardPropertyMask) & BIT7) != 0) {
956 //
957 // Pool head is put near the head Guard
958 //
959 return (VOID *)(UINTN)Memory;
960 }
961
962 //
963 // Pool head is put near the tail Guard
964 //
965 return (VOID *)(UINTN)(Memory & ~EFI_PAGE_MASK);
966 }
967
968 /**
969 Helper function of memory allocation with Guard pages.
970
971 @param FreePageList The free page node.
972 @param NumberOfPages Number of pages to be allocated.
973 @param MaxAddress Request to allocate memory below this address.
974 @param MemoryType Type of memory requested.
975
976 @return Memory address of allocated pages.
977 **/
978 UINTN
979 InternalAllocMaxAddressWithGuard (
980 IN OUT LIST_ENTRY *FreePageList,
981 IN UINTN NumberOfPages,
982 IN UINTN MaxAddress,
983 IN EFI_MEMORY_TYPE MemoryType
984
985 )
986 {
987 LIST_ENTRY *Node;
988 FREE_PAGE_LIST *Pages;
989 UINTN PagesToAlloc;
990 UINTN HeadGuard;
991 UINTN TailGuard;
992 UINTN Address;
993
994 for (Node = FreePageList->BackLink; Node != FreePageList;
995 Node = Node->BackLink) {
996 Pages = BASE_CR (Node, FREE_PAGE_LIST, Link);
997 if (Pages->NumberOfPages >= NumberOfPages &&
998 (UINTN)Pages + EFI_PAGES_TO_SIZE (NumberOfPages) - 1 <= MaxAddress) {
999
1000 //
1001 // We may need 1 or 2 more pages for Guard. Check it out.
1002 //
1003 PagesToAlloc = NumberOfPages;
1004 TailGuard = (UINTN)Pages + EFI_PAGES_TO_SIZE (Pages->NumberOfPages);
1005 if (!IsGuardPage (TailGuard)) {
1006 //
1007 // Add one if no Guard at the end of current free memory block.
1008 //
1009 PagesToAlloc += 1;
1010 TailGuard = 0;
1011 }
1012
1013 HeadGuard = (UINTN)Pages +
1014 EFI_PAGES_TO_SIZE (Pages->NumberOfPages - PagesToAlloc) -
1015 EFI_PAGE_SIZE;
1016 if (!IsGuardPage (HeadGuard)) {
1017 //
1018 // Add one if no Guard at the page before the address to allocate
1019 //
1020 PagesToAlloc += 1;
1021 HeadGuard = 0;
1022 }
1023
1024 if (Pages->NumberOfPages < PagesToAlloc) {
1025 // Not enough space to allocate memory with Guards? Try next block.
1026 continue;
1027 }
1028
1029 Address = InternalAllocPagesOnOneNode (Pages, PagesToAlloc, MaxAddress);
1030 ConvertSmmMemoryMapEntry(MemoryType, Address, PagesToAlloc, FALSE);
1031 CoreFreeMemoryMapStack();
1032 if (HeadGuard == 0) {
1033 // Don't pass the Guard page to user.
1034 Address += EFI_PAGE_SIZE;
1035 }
1036 SetGuardForMemory (Address, NumberOfPages);
1037 return Address;
1038 }
1039 }
1040
1041 return (UINTN)(-1);
1042 }
1043
1044 /**
1045 Helper function of memory free with Guard pages.
1046
1047 @param[in] Memory Base address of memory being freed.
1048 @param[in] NumberOfPages The number of pages to free.
1049 @param[in] AddRegion If this memory is new added region.
1050
1051 @retval EFI_NOT_FOUND Could not find the entry that covers the range.
1052 @retval EFI_INVALID_PARAMETER Address not aligned, Address is zero or NumberOfPages is zero.
1053 @return EFI_SUCCESS Pages successfully freed.
1054 **/
1055 EFI_STATUS
1056 SmmInternalFreePagesExWithGuard (
1057 IN EFI_PHYSICAL_ADDRESS Memory,
1058 IN UINTN NumberOfPages,
1059 IN BOOLEAN AddRegion
1060 )
1061 {
1062 EFI_PHYSICAL_ADDRESS MemoryToFree;
1063 UINTN PagesToFree;
1064
1065 if (((Memory & EFI_PAGE_MASK) != 0) || (Memory == 0) || (NumberOfPages == 0)) {
1066 return EFI_INVALID_PARAMETER;
1067 }
1068
1069 MemoryToFree = Memory;
1070 PagesToFree = NumberOfPages;
1071
1072 AdjustMemoryF (&MemoryToFree, &PagesToFree);
1073 UnsetGuardForMemory (Memory, NumberOfPages);
1074 if (PagesToFree == 0) {
1075 return EFI_SUCCESS;
1076 }
1077
1078 return SmmInternalFreePagesEx (MemoryToFree, PagesToFree, AddRegion);
1079 }
1080
1081 /**
1082 Set all Guard pages which cannot be set during the non-SMM mode time.
1083 **/
1084 VOID
1085 SetAllGuardPages (
1086 VOID
1087 )
1088 {
1089 UINTN Entries[GUARDED_HEAP_MAP_TABLE_DEPTH];
1090 UINTN Shifts[GUARDED_HEAP_MAP_TABLE_DEPTH];
1091 UINTN Indices[GUARDED_HEAP_MAP_TABLE_DEPTH];
1092 UINT64 Tables[GUARDED_HEAP_MAP_TABLE_DEPTH];
1093 UINT64 Addresses[GUARDED_HEAP_MAP_TABLE_DEPTH];
1094 UINT64 TableEntry;
1095 UINT64 Address;
1096 UINT64 GuardPage;
1097 INTN Level;
1098 UINTN Index;
1099 BOOLEAN OnGuarding;
1100
1101 if (mGuardedMemoryMap == 0 ||
1102 mMapLevel == 0 ||
1103 mMapLevel > GUARDED_HEAP_MAP_TABLE_DEPTH) {
1104 return;
1105 }
1106
1107 CopyMem (Entries, mLevelMask, sizeof (Entries));
1108 CopyMem (Shifts, mLevelShift, sizeof (Shifts));
1109
1110 SetMem (Tables, sizeof(Tables), 0);
1111 SetMem (Addresses, sizeof(Addresses), 0);
1112 SetMem (Indices, sizeof(Indices), 0);
1113
1114 Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;
1115 Tables[Level] = mGuardedMemoryMap;
1116 Address = 0;
1117 OnGuarding = FALSE;
1118
1119 DEBUG_CODE (
1120 DumpGuardedMemoryBitmap ();
1121 );
1122
1123 while (TRUE) {
1124 if (Indices[Level] > Entries[Level]) {
1125 Tables[Level] = 0;
1126 Level -= 1;
1127 } else {
1128
1129 TableEntry = ((UINT64 *)(UINTN)(Tables[Level]))[Indices[Level]];
1130 Address = Addresses[Level];
1131
1132 if (TableEntry == 0) {
1133
1134 OnGuarding = FALSE;
1135
1136 } else if (Level < GUARDED_HEAP_MAP_TABLE_DEPTH - 1) {
1137
1138 Level += 1;
1139 Tables[Level] = TableEntry;
1140 Addresses[Level] = Address;
1141 Indices[Level] = 0;
1142
1143 continue;
1144
1145 } else {
1146
1147 Index = 0;
1148 while (Index < GUARDED_HEAP_MAP_ENTRY_BITS) {
1149 if ((TableEntry & 1) == 1) {
1150 if (OnGuarding) {
1151 GuardPage = 0;
1152 } else {
1153 GuardPage = Address - EFI_PAGE_SIZE;
1154 }
1155 OnGuarding = TRUE;
1156 } else {
1157 if (OnGuarding) {
1158 GuardPage = Address;
1159 } else {
1160 GuardPage = 0;
1161 }
1162 OnGuarding = FALSE;
1163 }
1164
1165 if (GuardPage != 0) {
1166 SetGuardPage (GuardPage);
1167 }
1168
1169 if (TableEntry == 0) {
1170 break;
1171 }
1172
1173 TableEntry = RShiftU64 (TableEntry, 1);
1174 Address += EFI_PAGE_SIZE;
1175 Index += 1;
1176 }
1177 }
1178 }
1179
1180 if (Level < (GUARDED_HEAP_MAP_TABLE_DEPTH - (INTN)mMapLevel)) {
1181 break;
1182 }
1183
1184 Indices[Level] += 1;
1185 Address = (Level == 0) ? 0 : Addresses[Level - 1];
1186 Addresses[Level] = Address | LShiftU64(Indices[Level], Shifts[Level]);
1187
1188 }
1189 }
1190
1191 /**
1192 Hook function used to set all Guard pages after entering SMM mode.
1193 **/
1194 VOID
1195 SmmEntryPointMemoryManagementHook (
1196 VOID
1197 )
1198 {
1199 EFI_STATUS Status;
1200
1201 if (mSmmMemoryAttribute == NULL) {
1202 Status = SmmLocateProtocol (
1203 &gEdkiiSmmMemoryAttributeProtocolGuid,
1204 NULL,
1205 (VOID **)&mSmmMemoryAttribute
1206 );
1207 if (!EFI_ERROR(Status)) {
1208 SetAllGuardPages ();
1209 }
1210 }
1211 }
1212
1213 /**
1214 Helper function to convert a UINT64 value in binary to a string.
1215
1216 @param[in] Value Value of a UINT64 integer.
1217 @param[out] BinString String buffer to contain the conversion result.
1218
1219 @return VOID.
1220 **/
1221 VOID
1222 Uint64ToBinString (
1223 IN UINT64 Value,
1224 OUT CHAR8 *BinString
1225 )
1226 {
1227 UINTN Index;
1228
1229 if (BinString == NULL) {
1230 return;
1231 }
1232
1233 for (Index = 64; Index > 0; --Index) {
1234 BinString[Index - 1] = '0' + (Value & 1);
1235 Value = RShiftU64 (Value, 1);
1236 }
1237 BinString[64] = '\0';
1238 }
1239
1240 /**
1241 Dump the guarded memory bit map.
1242 **/
1243 VOID
1244 EFIAPI
1245 DumpGuardedMemoryBitmap (
1246 VOID
1247 )
1248 {
1249 UINTN Entries[GUARDED_HEAP_MAP_TABLE_DEPTH];
1250 UINTN Shifts[GUARDED_HEAP_MAP_TABLE_DEPTH];
1251 UINTN Indices[GUARDED_HEAP_MAP_TABLE_DEPTH];
1252 UINT64 Tables[GUARDED_HEAP_MAP_TABLE_DEPTH];
1253 UINT64 Addresses[GUARDED_HEAP_MAP_TABLE_DEPTH];
1254 UINT64 TableEntry;
1255 UINT64 Address;
1256 INTN Level;
1257 UINTN RepeatZero;
1258 CHAR8 String[GUARDED_HEAP_MAP_ENTRY_BITS + 1];
1259 CHAR8 *Ruler1;
1260 CHAR8 *Ruler2;
1261
1262 if (mGuardedMemoryMap == 0 ||
1263 mMapLevel == 0 ||
1264 mMapLevel > GUARDED_HEAP_MAP_TABLE_DEPTH) {
1265 return;
1266 }
1267
1268 Ruler1 = " 3 2 1 0";
1269 Ruler2 = "FEDCBA9876543210FEDCBA9876543210FEDCBA9876543210FEDCBA9876543210";
1270
1271 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "============================="
1272 " Guarded Memory Bitmap "
1273 "==============================\r\n"));
1274 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, " %a\r\n", Ruler1));
1275 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, " %a\r\n", Ruler2));
1276
1277 CopyMem (Entries, mLevelMask, sizeof (Entries));
1278 CopyMem (Shifts, mLevelShift, sizeof (Shifts));
1279
1280 SetMem (Indices, sizeof(Indices), 0);
1281 SetMem (Tables, sizeof(Tables), 0);
1282 SetMem (Addresses, sizeof(Addresses), 0);
1283
1284 Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;
1285 Tables[Level] = mGuardedMemoryMap;
1286 Address = 0;
1287 RepeatZero = 0;
1288
1289 while (TRUE) {
1290 if (Indices[Level] > Entries[Level]) {
1291
1292 Tables[Level] = 0;
1293 Level -= 1;
1294 RepeatZero = 0;
1295
1296 DEBUG ((
1297 HEAP_GUARD_DEBUG_LEVEL,
1298 "========================================="
1299 "=========================================\r\n"
1300 ));
1301
1302 } else {
1303
1304 TableEntry = ((UINT64 *)(UINTN)Tables[Level])[Indices[Level]];
1305 Address = Addresses[Level];
1306
1307 if (TableEntry == 0) {
1308
1309 if (Level == GUARDED_HEAP_MAP_TABLE_DEPTH - 1) {
1310 if (RepeatZero == 0) {
1311 Uint64ToBinString(TableEntry, String);
1312 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "%016lx: %a\r\n", Address, String));
1313 } else if (RepeatZero == 1) {
1314 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "... : ...\r\n"));
1315 }
1316 RepeatZero += 1;
1317 }
1318
1319 } else if (Level < GUARDED_HEAP_MAP_TABLE_DEPTH - 1) {
1320
1321 Level += 1;
1322 Tables[Level] = TableEntry;
1323 Addresses[Level] = Address;
1324 Indices[Level] = 0;
1325 RepeatZero = 0;
1326
1327 continue;
1328
1329 } else {
1330
1331 RepeatZero = 0;
1332 Uint64ToBinString(TableEntry, String);
1333 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "%016lx: %a\r\n", Address, String));
1334
1335 }
1336 }
1337
1338 if (Level < (GUARDED_HEAP_MAP_TABLE_DEPTH - (INTN)mMapLevel)) {
1339 break;
1340 }
1341
1342 Indices[Level] += 1;
1343 Address = (Level == 0) ? 0 : Addresses[Level - 1];
1344 Addresses[Level] = Address | LShiftU64(Indices[Level], Shifts[Level]);
1345
1346 }
1347 }
1348
1349 /**
1350 Debug function used to verify if the Guard page is well set or not.
1351
1352 @param[in] BaseAddress Address of memory to check.
1353 @param[in] NumberOfPages Size of memory in pages.
1354
1355 @return TRUE The head Guard and tail Guard are both well set.
1356 @return FALSE The head Guard and/or tail Guard are not well set.
1357 **/
1358 BOOLEAN
1359 VerifyMemoryGuard (
1360 IN EFI_PHYSICAL_ADDRESS BaseAddress,
1361 IN UINTN NumberOfPages
1362 )
1363 {
1364 EFI_STATUS Status;
1365 UINT64 Attribute;
1366 EFI_PHYSICAL_ADDRESS Address;
1367
1368 if (mSmmMemoryAttribute == NULL) {
1369 return TRUE;
1370 }
1371
1372 Attribute = 0;
1373 Address = BaseAddress - EFI_PAGE_SIZE;
1374 Status = mSmmMemoryAttribute->GetMemoryAttributes (
1375 mSmmMemoryAttribute,
1376 Address,
1377 EFI_PAGE_SIZE,
1378 &Attribute
1379 );
1380 if (EFI_ERROR (Status) || (Attribute & EFI_MEMORY_RP) == 0) {
1381 DEBUG ((DEBUG_ERROR, "Head Guard is not set at: %016lx (%016lX)!!!\r\n",
1382 Address, Attribute));
1383 DumpGuardedMemoryBitmap ();
1384 return FALSE;
1385 }
1386
1387 Attribute = 0;
1388 Address = BaseAddress + EFI_PAGES_TO_SIZE (NumberOfPages);
1389 Status = mSmmMemoryAttribute->GetMemoryAttributes (
1390 mSmmMemoryAttribute,
1391 Address,
1392 EFI_PAGE_SIZE,
1393 &Attribute
1394 );
1395 if (EFI_ERROR (Status) || (Attribute & EFI_MEMORY_RP) == 0) {
1396 DEBUG ((DEBUG_ERROR, "Tail Guard is not set at: %016lx (%016lX)!!!\r\n",
1397 Address, Attribute));
1398 DumpGuardedMemoryBitmap ();
1399 return FALSE;
1400 }
1401
1402 return TRUE;
1403 }
1404