]> git.proxmox.com Git - mirror_edk2.git/blob - MdeModulePkg/Core/Dxe/Mem/HeapGuard.c
MdeModulePkg/DxeCore: Fix issues in Heap Guard
[mirror_edk2.git] / MdeModulePkg / Core / Dxe / Mem / HeapGuard.c
1 /** @file
2 UEFI Heap Guard functions.
3
4 Copyright (c) 2017, Intel Corporation. All rights reserved.<BR>
5 This program and the accompanying materials
6 are licensed and made available under the terms and conditions of the BSD License
7 which accompanies this distribution. The full text of the license may be found at
8 http://opensource.org/licenses/bsd-license.php
9
10 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
11 WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
12
13 **/
14
15 #include "DxeMain.h"
16 #include "Imem.h"
17 #include "HeapGuard.h"
18
19 //
20 // Global to avoid infinite reentrance of memory allocation when updating
21 // page table attributes, which may need allocate pages for new PDE/PTE.
22 //
23 GLOBAL_REMOVE_IF_UNREFERENCED BOOLEAN mOnGuarding = FALSE;
24
25 //
26 // Pointer to table tracking the Guarded memory with bitmap, in which '1'
27 // is used to indicate memory guarded. '0' might be free memory or Guard
28 // page itself, depending on status of memory adjacent to it.
29 //
30 GLOBAL_REMOVE_IF_UNREFERENCED UINT64 mGuardedMemoryMap = 0;
31
32 //
33 // Current depth level of map table pointed by mGuardedMemoryMap.
34 // mMapLevel must be initialized at least by 1. It will be automatically
35 // updated according to the address of memory just tracked.
36 //
37 GLOBAL_REMOVE_IF_UNREFERENCED UINTN mMapLevel = 1;
38
39 //
40 // Shift and mask for each level of map table
41 //
42 GLOBAL_REMOVE_IF_UNREFERENCED UINTN mLevelShift[GUARDED_HEAP_MAP_TABLE_DEPTH]
43 = GUARDED_HEAP_MAP_TABLE_DEPTH_SHIFTS;
44 GLOBAL_REMOVE_IF_UNREFERENCED UINTN mLevelMask[GUARDED_HEAP_MAP_TABLE_DEPTH]
45 = GUARDED_HEAP_MAP_TABLE_DEPTH_MASKS;
46
47 /**
48 Set corresponding bits in bitmap table to 1 according to the address.
49
50 @param[in] Address Start address to set for.
51 @param[in] BitNumber Number of bits to set.
52 @param[in] BitMap Pointer to bitmap which covers the Address.
53
54 @return VOID.
55 **/
56 STATIC
57 VOID
58 SetBits (
59 IN EFI_PHYSICAL_ADDRESS Address,
60 IN UINTN BitNumber,
61 IN UINT64 *BitMap
62 )
63 {
64 UINTN Lsbs;
65 UINTN Qwords;
66 UINTN Msbs;
67 UINTN StartBit;
68 UINTN EndBit;
69
70 StartBit = (UINTN)GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address);
71 EndBit = (StartBit + BitNumber - 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
72
73 if ((StartBit + BitNumber) > GUARDED_HEAP_MAP_ENTRY_BITS) {
74 Msbs = (GUARDED_HEAP_MAP_ENTRY_BITS - StartBit) %
75 GUARDED_HEAP_MAP_ENTRY_BITS;
76 Lsbs = (EndBit + 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
77 Qwords = (BitNumber - Msbs) / GUARDED_HEAP_MAP_ENTRY_BITS;
78 } else {
79 Msbs = BitNumber;
80 Lsbs = 0;
81 Qwords = 0;
82 }
83
84 if (Msbs > 0) {
85 *BitMap |= LShiftU64 (LShiftU64 (1, Msbs) - 1, StartBit);
86 BitMap += 1;
87 }
88
89 if (Qwords > 0) {
90 SetMem64 ((VOID *)BitMap, Qwords * GUARDED_HEAP_MAP_ENTRY_BYTES,
91 (UINT64)-1);
92 BitMap += Qwords;
93 }
94
95 if (Lsbs > 0) {
96 *BitMap |= (LShiftU64 (1, Lsbs) - 1);
97 }
98 }
99
100 /**
101 Set corresponding bits in bitmap table to 0 according to the address.
102
103 @param[in] Address Start address to set for.
104 @param[in] BitNumber Number of bits to set.
105 @param[in] BitMap Pointer to bitmap which covers the Address.
106
107 @return VOID.
108 **/
109 STATIC
110 VOID
111 ClearBits (
112 IN EFI_PHYSICAL_ADDRESS Address,
113 IN UINTN BitNumber,
114 IN UINT64 *BitMap
115 )
116 {
117 UINTN Lsbs;
118 UINTN Qwords;
119 UINTN Msbs;
120 UINTN StartBit;
121 UINTN EndBit;
122
123 StartBit = (UINTN)GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address);
124 EndBit = (StartBit + BitNumber - 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
125
126 if ((StartBit + BitNumber) > GUARDED_HEAP_MAP_ENTRY_BITS) {
127 Msbs = (GUARDED_HEAP_MAP_ENTRY_BITS - StartBit) %
128 GUARDED_HEAP_MAP_ENTRY_BITS;
129 Lsbs = (EndBit + 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
130 Qwords = (BitNumber - Msbs) / GUARDED_HEAP_MAP_ENTRY_BITS;
131 } else {
132 Msbs = BitNumber;
133 Lsbs = 0;
134 Qwords = 0;
135 }
136
137 if (Msbs > 0) {
138 *BitMap &= ~LShiftU64 (LShiftU64 (1, Msbs) - 1, StartBit);
139 BitMap += 1;
140 }
141
142 if (Qwords > 0) {
143 SetMem64 ((VOID *)BitMap, Qwords * GUARDED_HEAP_MAP_ENTRY_BYTES, 0);
144 BitMap += Qwords;
145 }
146
147 if (Lsbs > 0) {
148 *BitMap &= ~(LShiftU64 (1, Lsbs) - 1);
149 }
150 }
151
152 /**
153 Get corresponding bits in bitmap table according to the address.
154
155 The value of bit 0 corresponds to the status of memory at given Address.
156 No more than 64 bits can be retrieved in one call.
157
158 @param[in] Address Start address to retrieve bits for.
159 @param[in] BitNumber Number of bits to get.
160 @param[in] BitMap Pointer to bitmap which covers the Address.
161
162 @return An integer containing the bits information.
163 **/
164 STATIC
165 UINT64
166 GetBits (
167 IN EFI_PHYSICAL_ADDRESS Address,
168 IN UINTN BitNumber,
169 IN UINT64 *BitMap
170 )
171 {
172 UINTN StartBit;
173 UINTN EndBit;
174 UINTN Lsbs;
175 UINTN Msbs;
176 UINT64 Result;
177
178 ASSERT (BitNumber <= GUARDED_HEAP_MAP_ENTRY_BITS);
179
180 StartBit = (UINTN)GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address);
181 EndBit = (StartBit + BitNumber - 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
182
183 if ((StartBit + BitNumber) > GUARDED_HEAP_MAP_ENTRY_BITS) {
184 Msbs = GUARDED_HEAP_MAP_ENTRY_BITS - StartBit;
185 Lsbs = (EndBit + 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
186 } else {
187 Msbs = BitNumber;
188 Lsbs = 0;
189 }
190
191 Result = RShiftU64 ((*BitMap), StartBit) & (LShiftU64 (1, Msbs) - 1);
192 if (Lsbs > 0) {
193 BitMap += 1;
194 Result |= LShiftU64 ((*BitMap) & (LShiftU64 (1, Lsbs) - 1), Msbs);
195 }
196
197 return Result;
198 }
199
200 /**
201 Locate the pointer of bitmap from the guarded memory bitmap tables, which
202 covers the given Address.
203
204 @param[in] Address Start address to search the bitmap for.
205 @param[in] AllocMapUnit Flag to indicate memory allocation for the table.
206 @param[out] BitMap Pointer to bitmap which covers the Address.
207
208 @return The bit number from given Address to the end of current map table.
209 **/
210 UINTN
211 FindGuardedMemoryMap (
212 IN EFI_PHYSICAL_ADDRESS Address,
213 IN BOOLEAN AllocMapUnit,
214 OUT UINT64 **BitMap
215 )
216 {
217 UINTN Level;
218 UINT64 *GuardMap;
219 UINT64 MapMemory;
220 UINTN Index;
221 UINTN Size;
222 UINTN BitsToUnitEnd;
223 EFI_STATUS Status;
224
225 //
226 // Adjust current map table depth according to the address to access
227 //
228 while (mMapLevel < GUARDED_HEAP_MAP_TABLE_DEPTH
229 &&
230 RShiftU64 (
231 Address,
232 mLevelShift[GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel - 1]
233 ) != 0) {
234
235 if (mGuardedMemoryMap != 0) {
236 Size = (mLevelMask[GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel - 1] + 1)
237 * GUARDED_HEAP_MAP_ENTRY_BYTES;
238 Status = CoreInternalAllocatePages (
239 AllocateAnyPages,
240 EfiBootServicesData,
241 EFI_SIZE_TO_PAGES (Size),
242 &MapMemory,
243 FALSE
244 );
245 ASSERT_EFI_ERROR (Status);
246 ASSERT (MapMemory != 0);
247
248 SetMem ((VOID *)(UINTN)MapMemory, Size, 0);
249
250 *(UINT64 *)(UINTN)MapMemory = mGuardedMemoryMap;
251 mGuardedMemoryMap = MapMemory;
252 }
253
254 mMapLevel++;
255
256 }
257
258 GuardMap = &mGuardedMemoryMap;
259 for (Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;
260 Level < GUARDED_HEAP_MAP_TABLE_DEPTH;
261 ++Level) {
262
263 if (*GuardMap == 0) {
264 if (!AllocMapUnit) {
265 GuardMap = NULL;
266 break;
267 }
268
269 Size = (mLevelMask[Level] + 1) * GUARDED_HEAP_MAP_ENTRY_BYTES;
270 Status = CoreInternalAllocatePages (
271 AllocateAnyPages,
272 EfiBootServicesData,
273 EFI_SIZE_TO_PAGES (Size),
274 &MapMemory,
275 FALSE
276 );
277 ASSERT_EFI_ERROR (Status);
278 ASSERT (MapMemory != 0);
279
280 SetMem ((VOID *)(UINTN)MapMemory, Size, 0);
281 *GuardMap = MapMemory;
282 }
283
284 Index = (UINTN)RShiftU64 (Address, mLevelShift[Level]);
285 Index &= mLevelMask[Level];
286 GuardMap = (UINT64 *)(UINTN)((*GuardMap) + Index * sizeof (UINT64));
287
288 }
289
290 BitsToUnitEnd = GUARDED_HEAP_MAP_BITS - GUARDED_HEAP_MAP_BIT_INDEX (Address);
291 *BitMap = GuardMap;
292
293 return BitsToUnitEnd;
294 }
295
296 /**
297 Set corresponding bits in bitmap table to 1 according to given memory range.
298
299 @param[in] Address Memory address to guard from.
300 @param[in] NumberOfPages Number of pages to guard.
301
302 @return VOID.
303 **/
304 VOID
305 EFIAPI
306 SetGuardedMemoryBits (
307 IN EFI_PHYSICAL_ADDRESS Address,
308 IN UINTN NumberOfPages
309 )
310 {
311 UINT64 *BitMap;
312 UINTN Bits;
313 UINTN BitsToUnitEnd;
314
315 while (NumberOfPages > 0) {
316 BitsToUnitEnd = FindGuardedMemoryMap (Address, TRUE, &BitMap);
317 ASSERT (BitMap != NULL);
318
319 if (NumberOfPages > BitsToUnitEnd) {
320 // Cross map unit
321 Bits = BitsToUnitEnd;
322 } else {
323 Bits = NumberOfPages;
324 }
325
326 SetBits (Address, Bits, BitMap);
327
328 NumberOfPages -= Bits;
329 Address += EFI_PAGES_TO_SIZE (Bits);
330 }
331 }
332
333 /**
334 Clear corresponding bits in bitmap table according to given memory range.
335
336 @param[in] Address Memory address to unset from.
337 @param[in] NumberOfPages Number of pages to unset guard.
338
339 @return VOID.
340 **/
341 VOID
342 EFIAPI
343 ClearGuardedMemoryBits (
344 IN EFI_PHYSICAL_ADDRESS Address,
345 IN UINTN NumberOfPages
346 )
347 {
348 UINT64 *BitMap;
349 UINTN Bits;
350 UINTN BitsToUnitEnd;
351
352 while (NumberOfPages > 0) {
353 BitsToUnitEnd = FindGuardedMemoryMap (Address, TRUE, &BitMap);
354 ASSERT (BitMap != NULL);
355
356 if (NumberOfPages > BitsToUnitEnd) {
357 // Cross map unit
358 Bits = BitsToUnitEnd;
359 } else {
360 Bits = NumberOfPages;
361 }
362
363 ClearBits (Address, Bits, BitMap);
364
365 NumberOfPages -= Bits;
366 Address += EFI_PAGES_TO_SIZE (Bits);
367 }
368 }
369
370 /**
371 Retrieve corresponding bits in bitmap table according to given memory range.
372
373 @param[in] Address Memory address to retrieve from.
374 @param[in] NumberOfPages Number of pages to retrieve.
375
376 @return An integer containing the guarded memory bitmap.
377 **/
378 UINTN
379 GetGuardedMemoryBits (
380 IN EFI_PHYSICAL_ADDRESS Address,
381 IN UINTN NumberOfPages
382 )
383 {
384 UINT64 *BitMap;
385 UINTN Bits;
386 UINTN Result;
387 UINTN Shift;
388 UINTN BitsToUnitEnd;
389
390 ASSERT (NumberOfPages <= GUARDED_HEAP_MAP_ENTRY_BITS);
391
392 Result = 0;
393 Shift = 0;
394 while (NumberOfPages > 0) {
395 BitsToUnitEnd = FindGuardedMemoryMap (Address, FALSE, &BitMap);
396
397 if (NumberOfPages > BitsToUnitEnd) {
398 // Cross map unit
399 Bits = BitsToUnitEnd;
400 } else {
401 Bits = NumberOfPages;
402 }
403
404 if (BitMap != NULL) {
405 Result |= LShiftU64 (GetBits (Address, Bits, BitMap), Shift);
406 }
407
408 Shift += Bits;
409 NumberOfPages -= Bits;
410 Address += EFI_PAGES_TO_SIZE (Bits);
411 }
412
413 return Result;
414 }
415
416 /**
417 Get bit value in bitmap table for the given address.
418
419 @param[in] Address The address to retrieve for.
420
421 @return 1 or 0.
422 **/
423 UINTN
424 EFIAPI
425 GetGuardMapBit (
426 IN EFI_PHYSICAL_ADDRESS Address
427 )
428 {
429 UINT64 *GuardMap;
430
431 FindGuardedMemoryMap (Address, FALSE, &GuardMap);
432 if (GuardMap != NULL) {
433 if (RShiftU64 (*GuardMap,
434 GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address)) & 1) {
435 return 1;
436 }
437 }
438
439 return 0;
440 }
441
442 /**
443 Set the bit in bitmap table for the given address.
444
445 @param[in] Address The address to set for.
446
447 @return VOID.
448 **/
449 VOID
450 EFIAPI
451 SetGuardMapBit (
452 IN EFI_PHYSICAL_ADDRESS Address
453 )
454 {
455 UINT64 *GuardMap;
456 UINT64 BitMask;
457
458 FindGuardedMemoryMap (Address, TRUE, &GuardMap);
459 if (GuardMap != NULL) {
460 BitMask = LShiftU64 (1, GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address));
461 *GuardMap |= BitMask;
462 }
463 }
464
465 /**
466 Clear the bit in bitmap table for the given address.
467
468 @param[in] Address The address to clear for.
469
470 @return VOID.
471 **/
472 VOID
473 EFIAPI
474 ClearGuardMapBit (
475 IN EFI_PHYSICAL_ADDRESS Address
476 )
477 {
478 UINT64 *GuardMap;
479 UINT64 BitMask;
480
481 FindGuardedMemoryMap (Address, TRUE, &GuardMap);
482 if (GuardMap != NULL) {
483 BitMask = LShiftU64 (1, GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address));
484 *GuardMap &= ~BitMask;
485 }
486 }
487
488 /**
489 Check to see if the page at the given address is a Guard page or not.
490
491 @param[in] Address The address to check for.
492
493 @return TRUE The page at Address is a Guard page.
494 @return FALSE The page at Address is not a Guard page.
495 **/
496 BOOLEAN
497 EFIAPI
498 IsGuardPage (
499 IN EFI_PHYSICAL_ADDRESS Address
500 )
501 {
502 UINTN BitMap;
503
504 //
505 // There must be at least one guarded page before and/or after given
506 // address if it's a Guard page. The bitmap pattern should be one of
507 // 001, 100 and 101
508 //
509 BitMap = GetGuardedMemoryBits (Address - EFI_PAGE_SIZE, 3);
510 return ((BitMap == BIT0) || (BitMap == BIT2) || (BitMap == (BIT2 | BIT0)));
511 }
512
513 /**
514 Check to see if the page at the given address is a head Guard page or not.
515
516 @param[in] Address The address to check for
517
518 @return TRUE The page at Address is a head Guard page
519 @return FALSE The page at Address is not a head Guard page
520 **/
521 BOOLEAN
522 EFIAPI
523 IsHeadGuard (
524 IN EFI_PHYSICAL_ADDRESS Address
525 )
526 {
527 return (GetGuardedMemoryBits (Address, 2) == BIT1);
528 }
529
530 /**
531 Check to see if the page at the given address is a tail Guard page or not.
532
533 @param[in] Address The address to check for.
534
535 @return TRUE The page at Address is a tail Guard page.
536 @return FALSE The page at Address is not a tail Guard page.
537 **/
538 BOOLEAN
539 EFIAPI
540 IsTailGuard (
541 IN EFI_PHYSICAL_ADDRESS Address
542 )
543 {
544 return (GetGuardedMemoryBits (Address - EFI_PAGE_SIZE, 2) == BIT0);
545 }
546
547 /**
548 Check to see if the page at the given address is guarded or not.
549
550 @param[in] Address The address to check for.
551
552 @return TRUE The page at Address is guarded.
553 @return FALSE The page at Address is not guarded.
554 **/
555 BOOLEAN
556 EFIAPI
557 IsMemoryGuarded (
558 IN EFI_PHYSICAL_ADDRESS Address
559 )
560 {
561 return (GetGuardMapBit (Address) == 1);
562 }
563
564 /**
565 Set the page at the given address to be a Guard page.
566
567 This is done by changing the page table attribute to be NOT PRSENT.
568
569 @param[in] BaseAddress Page address to Guard at
570
571 @return VOID
572 **/
573 VOID
574 EFIAPI
575 SetGuardPage (
576 IN EFI_PHYSICAL_ADDRESS BaseAddress
577 )
578 {
579 //
580 // Set flag to make sure allocating memory without GUARD for page table
581 // operation; otherwise infinite loops could be caused.
582 //
583 mOnGuarding = TRUE;
584 //
585 // Note: This might overwrite other attributes needed by other features,
586 // such as memory protection (NX). Please make sure they are not enabled
587 // at the same time.
588 //
589 gCpu->SetMemoryAttributes (gCpu, BaseAddress, EFI_PAGE_SIZE, EFI_MEMORY_RP);
590 mOnGuarding = FALSE;
591 }
592
593 /**
594 Unset the Guard page at the given address to the normal memory.
595
596 This is done by changing the page table attribute to be PRSENT.
597
598 @param[in] BaseAddress Page address to Guard at.
599
600 @return VOID.
601 **/
602 VOID
603 EFIAPI
604 UnsetGuardPage (
605 IN EFI_PHYSICAL_ADDRESS BaseAddress
606 )
607 {
608 //
609 // Set flag to make sure allocating memory without GUARD for page table
610 // operation; otherwise infinite loops could be caused.
611 //
612 mOnGuarding = TRUE;
613 //
614 // Note: This might overwrite other attributes needed by other features,
615 // such as memory protection (NX). Please make sure they are not enabled
616 // at the same time.
617 //
618 gCpu->SetMemoryAttributes (gCpu, BaseAddress, EFI_PAGE_SIZE, 0);
619 mOnGuarding = FALSE;
620 }
621
622 /**
623 Check to see if the memory at the given address should be guarded or not.
624
625 @param[in] MemoryType Memory type to check.
626 @param[in] AllocateType Allocation type to check.
627 @param[in] PageOrPool Indicate a page allocation or pool allocation.
628
629
630 @return TRUE The given type of memory should be guarded.
631 @return FALSE The given type of memory should not be guarded.
632 **/
633 BOOLEAN
634 IsMemoryTypeToGuard (
635 IN EFI_MEMORY_TYPE MemoryType,
636 IN EFI_ALLOCATE_TYPE AllocateType,
637 IN UINT8 PageOrPool
638 )
639 {
640 UINT64 TestBit;
641 UINT64 ConfigBit;
642 BOOLEAN InSmm;
643
644 if (gCpu == NULL || AllocateType == AllocateAddress) {
645 return FALSE;
646 }
647
648 InSmm = FALSE;
649 if (gSmmBase2 != NULL) {
650 gSmmBase2->InSmm (gSmmBase2, &InSmm);
651 }
652
653 if (InSmm) {
654 return FALSE;
655 }
656
657 if ((PcdGet8 (PcdHeapGuardPropertyMask) & PageOrPool) == 0) {
658 return FALSE;
659 }
660
661 if (PageOrPool == GUARD_HEAP_TYPE_POOL) {
662 ConfigBit = PcdGet64 (PcdHeapGuardPoolType);
663 } else if (PageOrPool == GUARD_HEAP_TYPE_PAGE) {
664 ConfigBit = PcdGet64 (PcdHeapGuardPageType);
665 } else {
666 ConfigBit = (UINT64)-1;
667 }
668
669 if ((UINT32)MemoryType >= MEMORY_TYPE_OS_RESERVED_MIN) {
670 TestBit = BIT63;
671 } else if ((UINT32) MemoryType >= MEMORY_TYPE_OEM_RESERVED_MIN) {
672 TestBit = BIT62;
673 } else if (MemoryType < EfiMaxMemoryType) {
674 TestBit = LShiftU64 (1, MemoryType);
675 } else if (MemoryType == EfiMaxMemoryType) {
676 TestBit = (UINT64)-1;
677 } else {
678 TestBit = 0;
679 }
680
681 return ((ConfigBit & TestBit) != 0);
682 }
683
684 /**
685 Check to see if the pool at the given address should be guarded or not.
686
687 @param[in] MemoryType Pool type to check.
688
689
690 @return TRUE The given type of pool should be guarded.
691 @return FALSE The given type of pool should not be guarded.
692 **/
693 BOOLEAN
694 IsPoolTypeToGuard (
695 IN EFI_MEMORY_TYPE MemoryType
696 )
697 {
698 return IsMemoryTypeToGuard (MemoryType, AllocateAnyPages,
699 GUARD_HEAP_TYPE_POOL);
700 }
701
702 /**
703 Check to see if the page at the given address should be guarded or not.
704
705 @param[in] MemoryType Page type to check.
706 @param[in] AllocateType Allocation type to check.
707
708 @return TRUE The given type of page should be guarded.
709 @return FALSE The given type of page should not be guarded.
710 **/
711 BOOLEAN
712 IsPageTypeToGuard (
713 IN EFI_MEMORY_TYPE MemoryType,
714 IN EFI_ALLOCATE_TYPE AllocateType
715 )
716 {
717 return IsMemoryTypeToGuard (MemoryType, AllocateType, GUARD_HEAP_TYPE_PAGE);
718 }
719
720 /**
721 Set head Guard and tail Guard for the given memory range.
722
723 @param[in] Memory Base address of memory to set guard for.
724 @param[in] NumberOfPages Memory size in pages.
725
726 @return VOID
727 **/
728 VOID
729 SetGuardForMemory (
730 IN EFI_PHYSICAL_ADDRESS Memory,
731 IN UINTN NumberOfPages
732 )
733 {
734 EFI_PHYSICAL_ADDRESS GuardPage;
735
736 //
737 // Set tail Guard
738 //
739 GuardPage = Memory + EFI_PAGES_TO_SIZE (NumberOfPages);
740 if (!IsGuardPage (GuardPage)) {
741 SetGuardPage (GuardPage);
742 }
743
744 // Set head Guard
745 GuardPage = Memory - EFI_PAGES_TO_SIZE (1);
746 if (!IsGuardPage (GuardPage)) {
747 SetGuardPage (GuardPage);
748 }
749
750 //
751 // Mark the memory range as Guarded
752 //
753 SetGuardedMemoryBits (Memory, NumberOfPages);
754 }
755
756 /**
757 Unset head Guard and tail Guard for the given memory range.
758
759 @param[in] Memory Base address of memory to unset guard for.
760 @param[in] NumberOfPages Memory size in pages.
761
762 @return VOID
763 **/
764 VOID
765 UnsetGuardForMemory (
766 IN EFI_PHYSICAL_ADDRESS Memory,
767 IN UINTN NumberOfPages
768 )
769 {
770 EFI_PHYSICAL_ADDRESS GuardPage;
771 UINT64 GuardBitmap;
772
773 if (NumberOfPages == 0) {
774 return;
775 }
776
777 //
778 // Head Guard must be one page before, if any.
779 //
780 // MSB-> 1 0 <-LSB
781 // -------------------
782 // Head Guard -> 0 1 -> Don't free Head Guard (shared Guard)
783 // Head Guard -> 0 0 -> Free Head Guard either (not shared Guard)
784 // 1 X -> Don't free first page (need a new Guard)
785 // (it'll be turned into a Guard page later)
786 // -------------------
787 // Start -> -1 -2
788 //
789 GuardPage = Memory - EFI_PAGES_TO_SIZE (1);
790 GuardBitmap = GetGuardedMemoryBits (Memory - EFI_PAGES_TO_SIZE (2), 2);
791 if ((GuardBitmap & BIT1) == 0) {
792 //
793 // Head Guard exists.
794 //
795 if ((GuardBitmap & BIT0) == 0) {
796 //
797 // If the head Guard is not a tail Guard of adjacent memory block,
798 // unset it.
799 //
800 UnsetGuardPage (GuardPage);
801 }
802 } else {
803 //
804 // Pages before memory to free are still in Guard. It's a partial free
805 // case. Turn first page of memory block to free into a new Guard.
806 //
807 SetGuardPage (Memory);
808 }
809
810 //
811 // Tail Guard must be the page after this memory block to free, if any.
812 //
813 // MSB-> 1 0 <-LSB
814 // --------------------
815 // 1 0 <- Tail Guard -> Don't free Tail Guard (shared Guard)
816 // 0 0 <- Tail Guard -> Free Tail Guard either (not shared Guard)
817 // X 1 -> Don't free last page (need a new Guard)
818 // (it'll be turned into a Guard page later)
819 // --------------------
820 // +1 +0 <- End
821 //
822 GuardPage = Memory + EFI_PAGES_TO_SIZE (NumberOfPages);
823 GuardBitmap = GetGuardedMemoryBits (GuardPage, 2);
824 if ((GuardBitmap & BIT0) == 0) {
825 //
826 // Tail Guard exists.
827 //
828 if ((GuardBitmap & BIT1) == 0) {
829 //
830 // If the tail Guard is not a head Guard of adjacent memory block,
831 // free it; otherwise, keep it.
832 //
833 UnsetGuardPage (GuardPage);
834 }
835 } else {
836 //
837 // Pages after memory to free are still in Guard. It's a partial free
838 // case. We need to keep one page to be a head Guard.
839 //
840 SetGuardPage (GuardPage - EFI_PAGES_TO_SIZE (1));
841 }
842
843 //
844 // No matter what, we just clear the mark of the Guarded memory.
845 //
846 ClearGuardedMemoryBits(Memory, NumberOfPages);
847 }
848
849 /**
850 Adjust address of free memory according to existing and/or required Guard.
851
852 This function will check if there're existing Guard pages of adjacent
853 memory blocks, and try to use it as the Guard page of the memory to be
854 allocated.
855
856 @param[in] Start Start address of free memory block.
857 @param[in] Size Size of free memory block.
858 @param[in] SizeRequested Size of memory to allocate.
859
860 @return The end address of memory block found.
861 @return 0 if no enough space for the required size of memory and its Guard.
862 **/
863 UINT64
864 AdjustMemoryS (
865 IN UINT64 Start,
866 IN UINT64 Size,
867 IN UINT64 SizeRequested
868 )
869 {
870 UINT64 Target;
871
872 Target = Start + Size - SizeRequested;
873
874 //
875 // At least one more page needed for Guard page.
876 //
877 if (Size < (SizeRequested + EFI_PAGES_TO_SIZE (1))) {
878 return 0;
879 }
880
881 if (!IsGuardPage (Start + Size)) {
882 // No Guard at tail to share. One more page is needed.
883 Target -= EFI_PAGES_TO_SIZE (1);
884 }
885
886 // Out of range?
887 if (Target < Start) {
888 return 0;
889 }
890
891 // At the edge?
892 if (Target == Start) {
893 if (!IsGuardPage (Target - EFI_PAGES_TO_SIZE (1))) {
894 // No enough space for a new head Guard if no Guard at head to share.
895 return 0;
896 }
897 }
898
899 // OK, we have enough pages for memory and its Guards. Return the End of the
900 // free space.
901 return Target + SizeRequested - 1;
902 }
903
904 /**
905 Adjust the start address and number of pages to free according to Guard.
906
907 The purpose of this function is to keep the shared Guard page with adjacent
908 memory block if it's still in guard, or free it if no more sharing. Another
909 is to reserve pages as Guard pages in partial page free situation.
910
911 @param[in,out] Memory Base address of memory to free.
912 @param[in,out] NumberOfPages Size of memory to free.
913
914 @return VOID.
915 **/
916 VOID
917 AdjustMemoryF (
918 IN OUT EFI_PHYSICAL_ADDRESS *Memory,
919 IN OUT UINTN *NumberOfPages
920 )
921 {
922 EFI_PHYSICAL_ADDRESS Start;
923 EFI_PHYSICAL_ADDRESS MemoryToTest;
924 UINTN PagesToFree;
925 UINT64 GuardBitmap;
926
927 if (Memory == NULL || NumberOfPages == NULL || *NumberOfPages == 0) {
928 return;
929 }
930
931 Start = *Memory;
932 PagesToFree = *NumberOfPages;
933
934 //
935 // Head Guard must be one page before, if any.
936 //
937 // MSB-> 1 0 <-LSB
938 // -------------------
939 // Head Guard -> 0 1 -> Don't free Head Guard (shared Guard)
940 // Head Guard -> 0 0 -> Free Head Guard either (not shared Guard)
941 // 1 X -> Don't free first page (need a new Guard)
942 // (it'll be turned into a Guard page later)
943 // -------------------
944 // Start -> -1 -2
945 //
946 MemoryToTest = Start - EFI_PAGES_TO_SIZE (2);
947 GuardBitmap = GetGuardedMemoryBits (MemoryToTest, 2);
948 if ((GuardBitmap & BIT1) == 0) {
949 //
950 // Head Guard exists.
951 //
952 if ((GuardBitmap & BIT0) == 0) {
953 //
954 // If the head Guard is not a tail Guard of adjacent memory block,
955 // free it; otherwise, keep it.
956 //
957 Start -= EFI_PAGES_TO_SIZE (1);
958 PagesToFree += 1;
959 }
960 } else {
961 //
962 // No Head Guard, and pages before memory to free are still in Guard. It's a
963 // partial free case. We need to keep one page to be a tail Guard.
964 //
965 Start += EFI_PAGES_TO_SIZE (1);
966 PagesToFree -= 1;
967 }
968
969 //
970 // Tail Guard must be the page after this memory block to free, if any.
971 //
972 // MSB-> 1 0 <-LSB
973 // --------------------
974 // 1 0 <- Tail Guard -> Don't free Tail Guard (shared Guard)
975 // 0 0 <- Tail Guard -> Free Tail Guard either (not shared Guard)
976 // X 1 -> Don't free last page (need a new Guard)
977 // (it'll be turned into a Guard page later)
978 // --------------------
979 // +1 +0 <- End
980 //
981 MemoryToTest = Start + EFI_PAGES_TO_SIZE (PagesToFree);
982 GuardBitmap = GetGuardedMemoryBits (MemoryToTest, 2);
983 if ((GuardBitmap & BIT0) == 0) {
984 //
985 // Tail Guard exists.
986 //
987 if ((GuardBitmap & BIT1) == 0) {
988 //
989 // If the tail Guard is not a head Guard of adjacent memory block,
990 // free it; otherwise, keep it.
991 //
992 PagesToFree += 1;
993 }
994 } else if (PagesToFree > 0) {
995 //
996 // No Tail Guard, and pages after memory to free are still in Guard. It's a
997 // partial free case. We need to keep one page to be a head Guard.
998 //
999 PagesToFree -= 1;
1000 }
1001
1002 *Memory = Start;
1003 *NumberOfPages = PagesToFree;
1004 }
1005
1006 /**
1007 Adjust the base and number of pages to really allocate according to Guard.
1008
1009 @param[in,out] Memory Base address of free memory.
1010 @param[in,out] NumberOfPages Size of memory to allocate.
1011
1012 @return VOID.
1013 **/
1014 VOID
1015 AdjustMemoryA (
1016 IN OUT EFI_PHYSICAL_ADDRESS *Memory,
1017 IN OUT UINTN *NumberOfPages
1018 )
1019 {
1020 //
1021 // FindFreePages() has already taken the Guard into account. It's safe to
1022 // adjust the start address and/or number of pages here, to make sure that
1023 // the Guards are also "allocated".
1024 //
1025 if (!IsGuardPage (*Memory + EFI_PAGES_TO_SIZE (*NumberOfPages))) {
1026 // No tail Guard, add one.
1027 *NumberOfPages += 1;
1028 }
1029
1030 if (!IsGuardPage (*Memory - EFI_PAGE_SIZE)) {
1031 // No head Guard, add one.
1032 *Memory -= EFI_PAGE_SIZE;
1033 *NumberOfPages += 1;
1034 }
1035 }
1036
1037 /**
1038 Adjust the pool head position to make sure the Guard page is adjavent to
1039 pool tail or pool head.
1040
1041 @param[in] Memory Base address of memory allocated.
1042 @param[in] NoPages Number of pages actually allocated.
1043 @param[in] Size Size of memory requested.
1044 (plus pool head/tail overhead)
1045
1046 @return Address of pool head.
1047 **/
1048 VOID *
1049 AdjustPoolHeadA (
1050 IN EFI_PHYSICAL_ADDRESS Memory,
1051 IN UINTN NoPages,
1052 IN UINTN Size
1053 )
1054 {
1055 if ((PcdGet8 (PcdHeapGuardPropertyMask) & BIT7) != 0) {
1056 //
1057 // Pool head is put near the head Guard
1058 //
1059 return (VOID *)(UINTN)Memory;
1060 }
1061
1062 //
1063 // Pool head is put near the tail Guard
1064 //
1065 return (VOID *)(UINTN)(Memory + EFI_PAGES_TO_SIZE (NoPages) - Size);
1066 }
1067
1068 /**
1069 Get the page base address according to pool head address.
1070
1071 @param[in] Memory Head address of pool to free.
1072
1073 @return Address of pool head.
1074 **/
1075 VOID *
1076 AdjustPoolHeadF (
1077 IN EFI_PHYSICAL_ADDRESS Memory
1078 )
1079 {
1080 if ((PcdGet8 (PcdHeapGuardPropertyMask) & BIT7) != 0) {
1081 //
1082 // Pool head is put near the head Guard
1083 //
1084 return (VOID *)(UINTN)Memory;
1085 }
1086
1087 //
1088 // Pool head is put near the tail Guard
1089 //
1090 return (VOID *)(UINTN)(Memory & ~EFI_PAGE_MASK);
1091 }
1092
1093 /**
1094 Allocate or free guarded memory.
1095
1096 @param[in] Start Start address of memory to allocate or free.
1097 @param[in] NumberOfPages Memory size in pages.
1098 @param[in] NewType Memory type to convert to.
1099
1100 @return VOID.
1101 **/
1102 EFI_STATUS
1103 CoreConvertPagesWithGuard (
1104 IN UINT64 Start,
1105 IN UINTN NumberOfPages,
1106 IN EFI_MEMORY_TYPE NewType
1107 )
1108 {
1109 if (NewType == EfiConventionalMemory) {
1110 AdjustMemoryF (&Start, &NumberOfPages);
1111 if (NumberOfPages == 0) {
1112 return EFI_SUCCESS;
1113 }
1114 } else {
1115 AdjustMemoryA (&Start, &NumberOfPages);
1116 }
1117
1118 return CoreConvertPages (Start, NumberOfPages, NewType);
1119 }
1120
1121 /**
1122 Helper function to convert a UINT64 value in binary to a string.
1123
1124 @param[in] Value Value of a UINT64 integer.
1125 @param[out] BinString String buffer to contain the conversion result.
1126
1127 @return VOID.
1128 **/
1129 VOID
1130 Uint64ToBinString (
1131 IN UINT64 Value,
1132 OUT CHAR8 *BinString
1133 )
1134 {
1135 UINTN Index;
1136
1137 if (BinString == NULL) {
1138 return;
1139 }
1140
1141 for (Index = 64; Index > 0; --Index) {
1142 BinString[Index - 1] = '0' + (Value & 1);
1143 Value = RShiftU64 (Value, 1);
1144 }
1145 BinString[64] = '\0';
1146 }
1147
1148 /**
1149 Dump the guarded memory bit map.
1150 **/
1151 VOID
1152 EFIAPI
1153 DumpGuardedMemoryBitmap (
1154 VOID
1155 )
1156 {
1157 UINTN Entries[GUARDED_HEAP_MAP_TABLE_DEPTH];
1158 UINTN Shifts[GUARDED_HEAP_MAP_TABLE_DEPTH];
1159 UINTN Indices[GUARDED_HEAP_MAP_TABLE_DEPTH];
1160 UINT64 Tables[GUARDED_HEAP_MAP_TABLE_DEPTH];
1161 UINT64 Addresses[GUARDED_HEAP_MAP_TABLE_DEPTH];
1162 UINT64 TableEntry;
1163 UINT64 Address;
1164 INTN Level;
1165 UINTN RepeatZero;
1166 CHAR8 String[GUARDED_HEAP_MAP_ENTRY_BITS + 1];
1167 CHAR8 *Ruler1;
1168 CHAR8 *Ruler2;
1169
1170 if (mGuardedMemoryMap == 0 ||
1171 mMapLevel == 0 ||
1172 mMapLevel > GUARDED_HEAP_MAP_TABLE_DEPTH) {
1173 return;
1174 }
1175
1176 Ruler1 = " 3 2 1 0";
1177 Ruler2 = "FEDCBA9876543210FEDCBA9876543210FEDCBA9876543210FEDCBA9876543210";
1178
1179 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "============================="
1180 " Guarded Memory Bitmap "
1181 "==============================\r\n"));
1182 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, " %a\r\n", Ruler1));
1183 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, " %a\r\n", Ruler2));
1184
1185 CopyMem (Entries, mLevelMask, sizeof (Entries));
1186 CopyMem (Shifts, mLevelShift, sizeof (Shifts));
1187
1188 SetMem (Indices, sizeof(Indices), 0);
1189 SetMem (Tables, sizeof(Tables), 0);
1190 SetMem (Addresses, sizeof(Addresses), 0);
1191
1192 Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;
1193 Tables[Level] = mGuardedMemoryMap;
1194 Address = 0;
1195 RepeatZero = 0;
1196
1197 while (TRUE) {
1198 if (Indices[Level] > Entries[Level]) {
1199
1200 Tables[Level] = 0;
1201 Level -= 1;
1202 RepeatZero = 0;
1203
1204 DEBUG ((
1205 HEAP_GUARD_DEBUG_LEVEL,
1206 "========================================="
1207 "=========================================\r\n"
1208 ));
1209
1210 } else {
1211
1212 TableEntry = ((UINT64 *)(UINTN)Tables[Level])[Indices[Level]];
1213 Address = Addresses[Level];
1214
1215 if (TableEntry == 0) {
1216
1217 if (Level == GUARDED_HEAP_MAP_TABLE_DEPTH - 1) {
1218 if (RepeatZero == 0) {
1219 Uint64ToBinString(TableEntry, String);
1220 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "%016lx: %a\r\n", Address, String));
1221 } else if (RepeatZero == 1) {
1222 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "... : ...\r\n"));
1223 }
1224 RepeatZero += 1;
1225 }
1226
1227 } else if (Level < GUARDED_HEAP_MAP_TABLE_DEPTH - 1) {
1228
1229 Level += 1;
1230 Tables[Level] = TableEntry;
1231 Addresses[Level] = Address;
1232 Indices[Level] = 0;
1233 RepeatZero = 0;
1234
1235 continue;
1236
1237 } else {
1238
1239 RepeatZero = 0;
1240 Uint64ToBinString(TableEntry, String);
1241 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "%016lx: %a\r\n", Address, String));
1242
1243 }
1244 }
1245
1246 if (Level < (GUARDED_HEAP_MAP_TABLE_DEPTH - (INTN)mMapLevel)) {
1247 break;
1248 }
1249
1250 Indices[Level] += 1;
1251 Address = (Level == 0) ? 0 : Addresses[Level - 1];
1252 Addresses[Level] = Address | LShiftU64(Indices[Level], Shifts[Level]);
1253
1254 }
1255 }
1256