]> git.proxmox.com Git - mirror_edk2.git/blob - MdeModulePkg/Core/Dxe/Mem/HeapGuard.c
ac043b5d9b264b2c3e27cff8727d1824d52aa304
[mirror_edk2.git] / MdeModulePkg / Core / Dxe / Mem / HeapGuard.c
1 /** @file
2 UEFI Heap Guard functions.
3
4 Copyright (c) 2017-2018, Intel Corporation. All rights reserved.<BR>
5 This program and the accompanying materials
6 are licensed and made available under the terms and conditions of the BSD License
7 which accompanies this distribution. The full text of the license may be found at
8 http://opensource.org/licenses/bsd-license.php
9
10 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
11 WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
12
13 **/
14
15 #include "DxeMain.h"
16 #include "Imem.h"
17 #include "HeapGuard.h"
18
19 //
20 // Global to avoid infinite reentrance of memory allocation when updating
21 // page table attributes, which may need allocate pages for new PDE/PTE.
22 //
23 GLOBAL_REMOVE_IF_UNREFERENCED BOOLEAN mOnGuarding = FALSE;
24
25 //
26 // Pointer to table tracking the Guarded memory with bitmap, in which '1'
27 // is used to indicate memory guarded. '0' might be free memory or Guard
28 // page itself, depending on status of memory adjacent to it.
29 //
30 GLOBAL_REMOVE_IF_UNREFERENCED UINT64 mGuardedMemoryMap = 0;
31
32 //
33 // Current depth level of map table pointed by mGuardedMemoryMap.
34 // mMapLevel must be initialized at least by 1. It will be automatically
35 // updated according to the address of memory just tracked.
36 //
37 GLOBAL_REMOVE_IF_UNREFERENCED UINTN mMapLevel = 1;
38
39 //
40 // Shift and mask for each level of map table
41 //
42 GLOBAL_REMOVE_IF_UNREFERENCED UINTN mLevelShift[GUARDED_HEAP_MAP_TABLE_DEPTH]
43 = GUARDED_HEAP_MAP_TABLE_DEPTH_SHIFTS;
44 GLOBAL_REMOVE_IF_UNREFERENCED UINTN mLevelMask[GUARDED_HEAP_MAP_TABLE_DEPTH]
45 = GUARDED_HEAP_MAP_TABLE_DEPTH_MASKS;
46
47 /**
48 Set corresponding bits in bitmap table to 1 according to the address.
49
50 @param[in] Address Start address to set for.
51 @param[in] BitNumber Number of bits to set.
52 @param[in] BitMap Pointer to bitmap which covers the Address.
53
54 @return VOID.
55 **/
56 STATIC
57 VOID
58 SetBits (
59 IN EFI_PHYSICAL_ADDRESS Address,
60 IN UINTN BitNumber,
61 IN UINT64 *BitMap
62 )
63 {
64 UINTN Lsbs;
65 UINTN Qwords;
66 UINTN Msbs;
67 UINTN StartBit;
68 UINTN EndBit;
69
70 StartBit = (UINTN)GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address);
71 EndBit = (StartBit + BitNumber - 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
72
73 if ((StartBit + BitNumber) > GUARDED_HEAP_MAP_ENTRY_BITS) {
74 Msbs = (GUARDED_HEAP_MAP_ENTRY_BITS - StartBit) %
75 GUARDED_HEAP_MAP_ENTRY_BITS;
76 Lsbs = (EndBit + 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
77 Qwords = (BitNumber - Msbs) / GUARDED_HEAP_MAP_ENTRY_BITS;
78 } else {
79 Msbs = BitNumber;
80 Lsbs = 0;
81 Qwords = 0;
82 }
83
84 if (Msbs > 0) {
85 *BitMap |= LShiftU64 (LShiftU64 (1, Msbs) - 1, StartBit);
86 BitMap += 1;
87 }
88
89 if (Qwords > 0) {
90 SetMem64 ((VOID *)BitMap, Qwords * GUARDED_HEAP_MAP_ENTRY_BYTES,
91 (UINT64)-1);
92 BitMap += Qwords;
93 }
94
95 if (Lsbs > 0) {
96 *BitMap |= (LShiftU64 (1, Lsbs) - 1);
97 }
98 }
99
100 /**
101 Set corresponding bits in bitmap table to 0 according to the address.
102
103 @param[in] Address Start address to set for.
104 @param[in] BitNumber Number of bits to set.
105 @param[in] BitMap Pointer to bitmap which covers the Address.
106
107 @return VOID.
108 **/
109 STATIC
110 VOID
111 ClearBits (
112 IN EFI_PHYSICAL_ADDRESS Address,
113 IN UINTN BitNumber,
114 IN UINT64 *BitMap
115 )
116 {
117 UINTN Lsbs;
118 UINTN Qwords;
119 UINTN Msbs;
120 UINTN StartBit;
121 UINTN EndBit;
122
123 StartBit = (UINTN)GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address);
124 EndBit = (StartBit + BitNumber - 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
125
126 if ((StartBit + BitNumber) > GUARDED_HEAP_MAP_ENTRY_BITS) {
127 Msbs = (GUARDED_HEAP_MAP_ENTRY_BITS - StartBit) %
128 GUARDED_HEAP_MAP_ENTRY_BITS;
129 Lsbs = (EndBit + 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
130 Qwords = (BitNumber - Msbs) / GUARDED_HEAP_MAP_ENTRY_BITS;
131 } else {
132 Msbs = BitNumber;
133 Lsbs = 0;
134 Qwords = 0;
135 }
136
137 if (Msbs > 0) {
138 *BitMap &= ~LShiftU64 (LShiftU64 (1, Msbs) - 1, StartBit);
139 BitMap += 1;
140 }
141
142 if (Qwords > 0) {
143 SetMem64 ((VOID *)BitMap, Qwords * GUARDED_HEAP_MAP_ENTRY_BYTES, 0);
144 BitMap += Qwords;
145 }
146
147 if (Lsbs > 0) {
148 *BitMap &= ~(LShiftU64 (1, Lsbs) - 1);
149 }
150 }
151
152 /**
153 Get corresponding bits in bitmap table according to the address.
154
155 The value of bit 0 corresponds to the status of memory at given Address.
156 No more than 64 bits can be retrieved in one call.
157
158 @param[in] Address Start address to retrieve bits for.
159 @param[in] BitNumber Number of bits to get.
160 @param[in] BitMap Pointer to bitmap which covers the Address.
161
162 @return An integer containing the bits information.
163 **/
164 STATIC
165 UINT64
166 GetBits (
167 IN EFI_PHYSICAL_ADDRESS Address,
168 IN UINTN BitNumber,
169 IN UINT64 *BitMap
170 )
171 {
172 UINTN StartBit;
173 UINTN EndBit;
174 UINTN Lsbs;
175 UINTN Msbs;
176 UINT64 Result;
177
178 ASSERT (BitNumber <= GUARDED_HEAP_MAP_ENTRY_BITS);
179
180 StartBit = (UINTN)GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address);
181 EndBit = (StartBit + BitNumber - 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
182
183 if ((StartBit + BitNumber) > GUARDED_HEAP_MAP_ENTRY_BITS) {
184 Msbs = GUARDED_HEAP_MAP_ENTRY_BITS - StartBit;
185 Lsbs = (EndBit + 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
186 } else {
187 Msbs = BitNumber;
188 Lsbs = 0;
189 }
190
191 Result = RShiftU64 ((*BitMap), StartBit) & (LShiftU64 (1, Msbs) - 1);
192 if (Lsbs > 0) {
193 BitMap += 1;
194 Result |= LShiftU64 ((*BitMap) & (LShiftU64 (1, Lsbs) - 1), Msbs);
195 }
196
197 return Result;
198 }
199
200 /**
201 Locate the pointer of bitmap from the guarded memory bitmap tables, which
202 covers the given Address.
203
204 @param[in] Address Start address to search the bitmap for.
205 @param[in] AllocMapUnit Flag to indicate memory allocation for the table.
206 @param[out] BitMap Pointer to bitmap which covers the Address.
207
208 @return The bit number from given Address to the end of current map table.
209 **/
210 UINTN
211 FindGuardedMemoryMap (
212 IN EFI_PHYSICAL_ADDRESS Address,
213 IN BOOLEAN AllocMapUnit,
214 OUT UINT64 **BitMap
215 )
216 {
217 UINTN Level;
218 UINT64 *GuardMap;
219 UINT64 MapMemory;
220 UINTN Index;
221 UINTN Size;
222 UINTN BitsToUnitEnd;
223 EFI_STATUS Status;
224
225 //
226 // Adjust current map table depth according to the address to access
227 //
228 while (AllocMapUnit &&
229 mMapLevel < GUARDED_HEAP_MAP_TABLE_DEPTH &&
230 RShiftU64 (
231 Address,
232 mLevelShift[GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel - 1]
233 ) != 0) {
234
235 if (mGuardedMemoryMap != 0) {
236 Size = (mLevelMask[GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel - 1] + 1)
237 * GUARDED_HEAP_MAP_ENTRY_BYTES;
238 Status = CoreInternalAllocatePages (
239 AllocateAnyPages,
240 EfiBootServicesData,
241 EFI_SIZE_TO_PAGES (Size),
242 &MapMemory,
243 FALSE
244 );
245 ASSERT_EFI_ERROR (Status);
246 ASSERT (MapMemory != 0);
247
248 SetMem ((VOID *)(UINTN)MapMemory, Size, 0);
249
250 *(UINT64 *)(UINTN)MapMemory = mGuardedMemoryMap;
251 mGuardedMemoryMap = MapMemory;
252 }
253
254 mMapLevel++;
255
256 }
257
258 GuardMap = &mGuardedMemoryMap;
259 for (Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;
260 Level < GUARDED_HEAP_MAP_TABLE_DEPTH;
261 ++Level) {
262
263 if (*GuardMap == 0) {
264 if (!AllocMapUnit) {
265 GuardMap = NULL;
266 break;
267 }
268
269 Size = (mLevelMask[Level] + 1) * GUARDED_HEAP_MAP_ENTRY_BYTES;
270 Status = CoreInternalAllocatePages (
271 AllocateAnyPages,
272 EfiBootServicesData,
273 EFI_SIZE_TO_PAGES (Size),
274 &MapMemory,
275 FALSE
276 );
277 ASSERT_EFI_ERROR (Status);
278 ASSERT (MapMemory != 0);
279
280 SetMem ((VOID *)(UINTN)MapMemory, Size, 0);
281 *GuardMap = MapMemory;
282 }
283
284 Index = (UINTN)RShiftU64 (Address, mLevelShift[Level]);
285 Index &= mLevelMask[Level];
286 GuardMap = (UINT64 *)(UINTN)((*GuardMap) + Index * sizeof (UINT64));
287
288 }
289
290 BitsToUnitEnd = GUARDED_HEAP_MAP_BITS - GUARDED_HEAP_MAP_BIT_INDEX (Address);
291 *BitMap = GuardMap;
292
293 return BitsToUnitEnd;
294 }
295
296 /**
297 Set corresponding bits in bitmap table to 1 according to given memory range.
298
299 @param[in] Address Memory address to guard from.
300 @param[in] NumberOfPages Number of pages to guard.
301
302 @return VOID.
303 **/
304 VOID
305 EFIAPI
306 SetGuardedMemoryBits (
307 IN EFI_PHYSICAL_ADDRESS Address,
308 IN UINTN NumberOfPages
309 )
310 {
311 UINT64 *BitMap;
312 UINTN Bits;
313 UINTN BitsToUnitEnd;
314
315 while (NumberOfPages > 0) {
316 BitsToUnitEnd = FindGuardedMemoryMap (Address, TRUE, &BitMap);
317 ASSERT (BitMap != NULL);
318
319 if (NumberOfPages > BitsToUnitEnd) {
320 // Cross map unit
321 Bits = BitsToUnitEnd;
322 } else {
323 Bits = NumberOfPages;
324 }
325
326 SetBits (Address, Bits, BitMap);
327
328 NumberOfPages -= Bits;
329 Address += EFI_PAGES_TO_SIZE (Bits);
330 }
331 }
332
333 /**
334 Clear corresponding bits in bitmap table according to given memory range.
335
336 @param[in] Address Memory address to unset from.
337 @param[in] NumberOfPages Number of pages to unset guard.
338
339 @return VOID.
340 **/
341 VOID
342 EFIAPI
343 ClearGuardedMemoryBits (
344 IN EFI_PHYSICAL_ADDRESS Address,
345 IN UINTN NumberOfPages
346 )
347 {
348 UINT64 *BitMap;
349 UINTN Bits;
350 UINTN BitsToUnitEnd;
351
352 while (NumberOfPages > 0) {
353 BitsToUnitEnd = FindGuardedMemoryMap (Address, TRUE, &BitMap);
354 ASSERT (BitMap != NULL);
355
356 if (NumberOfPages > BitsToUnitEnd) {
357 // Cross map unit
358 Bits = BitsToUnitEnd;
359 } else {
360 Bits = NumberOfPages;
361 }
362
363 ClearBits (Address, Bits, BitMap);
364
365 NumberOfPages -= Bits;
366 Address += EFI_PAGES_TO_SIZE (Bits);
367 }
368 }
369
370 /**
371 Retrieve corresponding bits in bitmap table according to given memory range.
372
373 @param[in] Address Memory address to retrieve from.
374 @param[in] NumberOfPages Number of pages to retrieve.
375
376 @return An integer containing the guarded memory bitmap.
377 **/
378 UINTN
379 GetGuardedMemoryBits (
380 IN EFI_PHYSICAL_ADDRESS Address,
381 IN UINTN NumberOfPages
382 )
383 {
384 UINT64 *BitMap;
385 UINTN Bits;
386 UINTN Result;
387 UINTN Shift;
388 UINTN BitsToUnitEnd;
389
390 ASSERT (NumberOfPages <= GUARDED_HEAP_MAP_ENTRY_BITS);
391
392 Result = 0;
393 Shift = 0;
394 while (NumberOfPages > 0) {
395 BitsToUnitEnd = FindGuardedMemoryMap (Address, FALSE, &BitMap);
396
397 if (NumberOfPages > BitsToUnitEnd) {
398 // Cross map unit
399 Bits = BitsToUnitEnd;
400 } else {
401 Bits = NumberOfPages;
402 }
403
404 if (BitMap != NULL) {
405 Result |= LShiftU64 (GetBits (Address, Bits, BitMap), Shift);
406 }
407
408 Shift += Bits;
409 NumberOfPages -= Bits;
410 Address += EFI_PAGES_TO_SIZE (Bits);
411 }
412
413 return Result;
414 }
415
416 /**
417 Get bit value in bitmap table for the given address.
418
419 @param[in] Address The address to retrieve for.
420
421 @return 1 or 0.
422 **/
423 UINTN
424 EFIAPI
425 GetGuardMapBit (
426 IN EFI_PHYSICAL_ADDRESS Address
427 )
428 {
429 UINT64 *GuardMap;
430
431 FindGuardedMemoryMap (Address, FALSE, &GuardMap);
432 if (GuardMap != NULL) {
433 if (RShiftU64 (*GuardMap,
434 GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address)) & 1) {
435 return 1;
436 }
437 }
438
439 return 0;
440 }
441
442 /**
443 Set the bit in bitmap table for the given address.
444
445 @param[in] Address The address to set for.
446
447 @return VOID.
448 **/
449 VOID
450 EFIAPI
451 SetGuardMapBit (
452 IN EFI_PHYSICAL_ADDRESS Address
453 )
454 {
455 UINT64 *GuardMap;
456 UINT64 BitMask;
457
458 FindGuardedMemoryMap (Address, TRUE, &GuardMap);
459 if (GuardMap != NULL) {
460 BitMask = LShiftU64 (1, GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address));
461 *GuardMap |= BitMask;
462 }
463 }
464
465 /**
466 Clear the bit in bitmap table for the given address.
467
468 @param[in] Address The address to clear for.
469
470 @return VOID.
471 **/
472 VOID
473 EFIAPI
474 ClearGuardMapBit (
475 IN EFI_PHYSICAL_ADDRESS Address
476 )
477 {
478 UINT64 *GuardMap;
479 UINT64 BitMask;
480
481 FindGuardedMemoryMap (Address, TRUE, &GuardMap);
482 if (GuardMap != NULL) {
483 BitMask = LShiftU64 (1, GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address));
484 *GuardMap &= ~BitMask;
485 }
486 }
487
488 /**
489 Check to see if the page at the given address is a Guard page or not.
490
491 @param[in] Address The address to check for.
492
493 @return TRUE The page at Address is a Guard page.
494 @return FALSE The page at Address is not a Guard page.
495 **/
496 BOOLEAN
497 EFIAPI
498 IsGuardPage (
499 IN EFI_PHYSICAL_ADDRESS Address
500 )
501 {
502 UINTN BitMap;
503
504 //
505 // There must be at least one guarded page before and/or after given
506 // address if it's a Guard page. The bitmap pattern should be one of
507 // 001, 100 and 101
508 //
509 BitMap = GetGuardedMemoryBits (Address - EFI_PAGE_SIZE, 3);
510 return ((BitMap == BIT0) || (BitMap == BIT2) || (BitMap == (BIT2 | BIT0)));
511 }
512
513 /**
514 Check to see if the page at the given address is a head Guard page or not.
515
516 @param[in] Address The address to check for
517
518 @return TRUE The page at Address is a head Guard page
519 @return FALSE The page at Address is not a head Guard page
520 **/
521 BOOLEAN
522 EFIAPI
523 IsHeadGuard (
524 IN EFI_PHYSICAL_ADDRESS Address
525 )
526 {
527 return (GetGuardedMemoryBits (Address, 2) == BIT1);
528 }
529
530 /**
531 Check to see if the page at the given address is a tail Guard page or not.
532
533 @param[in] Address The address to check for.
534
535 @return TRUE The page at Address is a tail Guard page.
536 @return FALSE The page at Address is not a tail Guard page.
537 **/
538 BOOLEAN
539 EFIAPI
540 IsTailGuard (
541 IN EFI_PHYSICAL_ADDRESS Address
542 )
543 {
544 return (GetGuardedMemoryBits (Address - EFI_PAGE_SIZE, 2) == BIT0);
545 }
546
547 /**
548 Check to see if the page at the given address is guarded or not.
549
550 @param[in] Address The address to check for.
551
552 @return TRUE The page at Address is guarded.
553 @return FALSE The page at Address is not guarded.
554 **/
555 BOOLEAN
556 EFIAPI
557 IsMemoryGuarded (
558 IN EFI_PHYSICAL_ADDRESS Address
559 )
560 {
561 return (GetGuardMapBit (Address) == 1);
562 }
563
564 /**
565 Set the page at the given address to be a Guard page.
566
567 This is done by changing the page table attribute to be NOT PRSENT.
568
569 @param[in] BaseAddress Page address to Guard at
570
571 @return VOID
572 **/
573 VOID
574 EFIAPI
575 SetGuardPage (
576 IN EFI_PHYSICAL_ADDRESS BaseAddress
577 )
578 {
579 //
580 // Set flag to make sure allocating memory without GUARD for page table
581 // operation; otherwise infinite loops could be caused.
582 //
583 mOnGuarding = TRUE;
584 //
585 // Note: This might overwrite other attributes needed by other features,
586 // such as NX memory protection.
587 //
588 gCpu->SetMemoryAttributes (gCpu, BaseAddress, EFI_PAGE_SIZE, EFI_MEMORY_RP);
589 mOnGuarding = FALSE;
590 }
591
592 /**
593 Unset the Guard page at the given address to the normal memory.
594
595 This is done by changing the page table attribute to be PRSENT.
596
597 @param[in] BaseAddress Page address to Guard at.
598
599 @return VOID.
600 **/
601 VOID
602 EFIAPI
603 UnsetGuardPage (
604 IN EFI_PHYSICAL_ADDRESS BaseAddress
605 )
606 {
607 UINT64 Attributes;
608
609 //
610 // Once the Guard page is unset, it will be freed back to memory pool. NX
611 // memory protection must be restored for this page if NX is enabled for free
612 // memory.
613 //
614 Attributes = 0;
615 if ((PcdGet64 (PcdDxeNxMemoryProtectionPolicy) & (1 << EfiConventionalMemory)) != 0) {
616 Attributes |= EFI_MEMORY_XP;
617 }
618
619 //
620 // Set flag to make sure allocating memory without GUARD for page table
621 // operation; otherwise infinite loops could be caused.
622 //
623 mOnGuarding = TRUE;
624 //
625 // Note: This might overwrite other attributes needed by other features,
626 // such as memory protection (NX). Please make sure they are not enabled
627 // at the same time.
628 //
629 gCpu->SetMemoryAttributes (gCpu, BaseAddress, EFI_PAGE_SIZE, Attributes);
630 mOnGuarding = FALSE;
631 }
632
633 /**
634 Check to see if the memory at the given address should be guarded or not.
635
636 @param[in] MemoryType Memory type to check.
637 @param[in] AllocateType Allocation type to check.
638 @param[in] PageOrPool Indicate a page allocation or pool allocation.
639
640
641 @return TRUE The given type of memory should be guarded.
642 @return FALSE The given type of memory should not be guarded.
643 **/
644 BOOLEAN
645 IsMemoryTypeToGuard (
646 IN EFI_MEMORY_TYPE MemoryType,
647 IN EFI_ALLOCATE_TYPE AllocateType,
648 IN UINT8 PageOrPool
649 )
650 {
651 UINT64 TestBit;
652 UINT64 ConfigBit;
653 BOOLEAN InSmm;
654
655 if (gCpu == NULL || AllocateType == AllocateAddress) {
656 return FALSE;
657 }
658
659 InSmm = FALSE;
660 if (gSmmBase2 != NULL) {
661 gSmmBase2->InSmm (gSmmBase2, &InSmm);
662 }
663
664 if (InSmm) {
665 return FALSE;
666 }
667
668 if ((PcdGet8 (PcdHeapGuardPropertyMask) & PageOrPool) == 0) {
669 return FALSE;
670 }
671
672 if (PageOrPool == GUARD_HEAP_TYPE_POOL) {
673 ConfigBit = PcdGet64 (PcdHeapGuardPoolType);
674 } else if (PageOrPool == GUARD_HEAP_TYPE_PAGE) {
675 ConfigBit = PcdGet64 (PcdHeapGuardPageType);
676 } else {
677 ConfigBit = (UINT64)-1;
678 }
679
680 if ((UINT32)MemoryType >= MEMORY_TYPE_OS_RESERVED_MIN) {
681 TestBit = BIT63;
682 } else if ((UINT32) MemoryType >= MEMORY_TYPE_OEM_RESERVED_MIN) {
683 TestBit = BIT62;
684 } else if (MemoryType < EfiMaxMemoryType) {
685 TestBit = LShiftU64 (1, MemoryType);
686 } else if (MemoryType == EfiMaxMemoryType) {
687 TestBit = (UINT64)-1;
688 } else {
689 TestBit = 0;
690 }
691
692 return ((ConfigBit & TestBit) != 0);
693 }
694
695 /**
696 Check to see if the pool at the given address should be guarded or not.
697
698 @param[in] MemoryType Pool type to check.
699
700
701 @return TRUE The given type of pool should be guarded.
702 @return FALSE The given type of pool should not be guarded.
703 **/
704 BOOLEAN
705 IsPoolTypeToGuard (
706 IN EFI_MEMORY_TYPE MemoryType
707 )
708 {
709 return IsMemoryTypeToGuard (MemoryType, AllocateAnyPages,
710 GUARD_HEAP_TYPE_POOL);
711 }
712
713 /**
714 Check to see if the page at the given address should be guarded or not.
715
716 @param[in] MemoryType Page type to check.
717 @param[in] AllocateType Allocation type to check.
718
719 @return TRUE The given type of page should be guarded.
720 @return FALSE The given type of page should not be guarded.
721 **/
722 BOOLEAN
723 IsPageTypeToGuard (
724 IN EFI_MEMORY_TYPE MemoryType,
725 IN EFI_ALLOCATE_TYPE AllocateType
726 )
727 {
728 return IsMemoryTypeToGuard (MemoryType, AllocateType, GUARD_HEAP_TYPE_PAGE);
729 }
730
731 /**
732 Check to see if the heap guard is enabled for page and/or pool allocation.
733
734 @return TRUE/FALSE.
735 **/
736 BOOLEAN
737 IsHeapGuardEnabled (
738 VOID
739 )
740 {
741 return IsMemoryTypeToGuard (EfiMaxMemoryType, AllocateAnyPages,
742 GUARD_HEAP_TYPE_POOL|GUARD_HEAP_TYPE_PAGE);
743 }
744
745 /**
746 Set head Guard and tail Guard for the given memory range.
747
748 @param[in] Memory Base address of memory to set guard for.
749 @param[in] NumberOfPages Memory size in pages.
750
751 @return VOID
752 **/
753 VOID
754 SetGuardForMemory (
755 IN EFI_PHYSICAL_ADDRESS Memory,
756 IN UINTN NumberOfPages
757 )
758 {
759 EFI_PHYSICAL_ADDRESS GuardPage;
760
761 //
762 // Set tail Guard
763 //
764 GuardPage = Memory + EFI_PAGES_TO_SIZE (NumberOfPages);
765 if (!IsGuardPage (GuardPage)) {
766 SetGuardPage (GuardPage);
767 }
768
769 // Set head Guard
770 GuardPage = Memory - EFI_PAGES_TO_SIZE (1);
771 if (!IsGuardPage (GuardPage)) {
772 SetGuardPage (GuardPage);
773 }
774
775 //
776 // Mark the memory range as Guarded
777 //
778 SetGuardedMemoryBits (Memory, NumberOfPages);
779 }
780
781 /**
782 Unset head Guard and tail Guard for the given memory range.
783
784 @param[in] Memory Base address of memory to unset guard for.
785 @param[in] NumberOfPages Memory size in pages.
786
787 @return VOID
788 **/
789 VOID
790 UnsetGuardForMemory (
791 IN EFI_PHYSICAL_ADDRESS Memory,
792 IN UINTN NumberOfPages
793 )
794 {
795 EFI_PHYSICAL_ADDRESS GuardPage;
796 UINT64 GuardBitmap;
797
798 if (NumberOfPages == 0) {
799 return;
800 }
801
802 //
803 // Head Guard must be one page before, if any.
804 //
805 // MSB-> 1 0 <-LSB
806 // -------------------
807 // Head Guard -> 0 1 -> Don't free Head Guard (shared Guard)
808 // Head Guard -> 0 0 -> Free Head Guard either (not shared Guard)
809 // 1 X -> Don't free first page (need a new Guard)
810 // (it'll be turned into a Guard page later)
811 // -------------------
812 // Start -> -1 -2
813 //
814 GuardPage = Memory - EFI_PAGES_TO_SIZE (1);
815 GuardBitmap = GetGuardedMemoryBits (Memory - EFI_PAGES_TO_SIZE (2), 2);
816 if ((GuardBitmap & BIT1) == 0) {
817 //
818 // Head Guard exists.
819 //
820 if ((GuardBitmap & BIT0) == 0) {
821 //
822 // If the head Guard is not a tail Guard of adjacent memory block,
823 // unset it.
824 //
825 UnsetGuardPage (GuardPage);
826 }
827 } else {
828 //
829 // Pages before memory to free are still in Guard. It's a partial free
830 // case. Turn first page of memory block to free into a new Guard.
831 //
832 SetGuardPage (Memory);
833 }
834
835 //
836 // Tail Guard must be the page after this memory block to free, if any.
837 //
838 // MSB-> 1 0 <-LSB
839 // --------------------
840 // 1 0 <- Tail Guard -> Don't free Tail Guard (shared Guard)
841 // 0 0 <- Tail Guard -> Free Tail Guard either (not shared Guard)
842 // X 1 -> Don't free last page (need a new Guard)
843 // (it'll be turned into a Guard page later)
844 // --------------------
845 // +1 +0 <- End
846 //
847 GuardPage = Memory + EFI_PAGES_TO_SIZE (NumberOfPages);
848 GuardBitmap = GetGuardedMemoryBits (GuardPage, 2);
849 if ((GuardBitmap & BIT0) == 0) {
850 //
851 // Tail Guard exists.
852 //
853 if ((GuardBitmap & BIT1) == 0) {
854 //
855 // If the tail Guard is not a head Guard of adjacent memory block,
856 // free it; otherwise, keep it.
857 //
858 UnsetGuardPage (GuardPage);
859 }
860 } else {
861 //
862 // Pages after memory to free are still in Guard. It's a partial free
863 // case. We need to keep one page to be a head Guard.
864 //
865 SetGuardPage (GuardPage - EFI_PAGES_TO_SIZE (1));
866 }
867
868 //
869 // No matter what, we just clear the mark of the Guarded memory.
870 //
871 ClearGuardedMemoryBits(Memory, NumberOfPages);
872 }
873
874 /**
875 Adjust address of free memory according to existing and/or required Guard.
876
877 This function will check if there're existing Guard pages of adjacent
878 memory blocks, and try to use it as the Guard page of the memory to be
879 allocated.
880
881 @param[in] Start Start address of free memory block.
882 @param[in] Size Size of free memory block.
883 @param[in] SizeRequested Size of memory to allocate.
884
885 @return The end address of memory block found.
886 @return 0 if no enough space for the required size of memory and its Guard.
887 **/
888 UINT64
889 AdjustMemoryS (
890 IN UINT64 Start,
891 IN UINT64 Size,
892 IN UINT64 SizeRequested
893 )
894 {
895 UINT64 Target;
896
897 //
898 // UEFI spec requires that allocated pool must be 8-byte aligned. If it's
899 // indicated to put the pool near the Tail Guard, we need extra bytes to
900 // make sure alignment of the returned pool address.
901 //
902 if ((PcdGet8 (PcdHeapGuardPropertyMask) & BIT7) == 0) {
903 SizeRequested = ALIGN_VALUE(SizeRequested, 8);
904 }
905
906 Target = Start + Size - SizeRequested;
907 ASSERT (Target >= Start);
908 if (Target == 0) {
909 return 0;
910 }
911
912 if (!IsGuardPage (Start + Size)) {
913 // No Guard at tail to share. One more page is needed.
914 Target -= EFI_PAGES_TO_SIZE (1);
915 }
916
917 // Out of range?
918 if (Target < Start) {
919 return 0;
920 }
921
922 // At the edge?
923 if (Target == Start) {
924 if (!IsGuardPage (Target - EFI_PAGES_TO_SIZE (1))) {
925 // No enough space for a new head Guard if no Guard at head to share.
926 return 0;
927 }
928 }
929
930 // OK, we have enough pages for memory and its Guards. Return the End of the
931 // free space.
932 return Target + SizeRequested - 1;
933 }
934
935 /**
936 Adjust the start address and number of pages to free according to Guard.
937
938 The purpose of this function is to keep the shared Guard page with adjacent
939 memory block if it's still in guard, or free it if no more sharing. Another
940 is to reserve pages as Guard pages in partial page free situation.
941
942 @param[in,out] Memory Base address of memory to free.
943 @param[in,out] NumberOfPages Size of memory to free.
944
945 @return VOID.
946 **/
947 VOID
948 AdjustMemoryF (
949 IN OUT EFI_PHYSICAL_ADDRESS *Memory,
950 IN OUT UINTN *NumberOfPages
951 )
952 {
953 EFI_PHYSICAL_ADDRESS Start;
954 EFI_PHYSICAL_ADDRESS MemoryToTest;
955 UINTN PagesToFree;
956 UINT64 GuardBitmap;
957
958 if (Memory == NULL || NumberOfPages == NULL || *NumberOfPages == 0) {
959 return;
960 }
961
962 Start = *Memory;
963 PagesToFree = *NumberOfPages;
964
965 //
966 // Head Guard must be one page before, if any.
967 //
968 // MSB-> 1 0 <-LSB
969 // -------------------
970 // Head Guard -> 0 1 -> Don't free Head Guard (shared Guard)
971 // Head Guard -> 0 0 -> Free Head Guard either (not shared Guard)
972 // 1 X -> Don't free first page (need a new Guard)
973 // (it'll be turned into a Guard page later)
974 // -------------------
975 // Start -> -1 -2
976 //
977 MemoryToTest = Start - EFI_PAGES_TO_SIZE (2);
978 GuardBitmap = GetGuardedMemoryBits (MemoryToTest, 2);
979 if ((GuardBitmap & BIT1) == 0) {
980 //
981 // Head Guard exists.
982 //
983 if ((GuardBitmap & BIT0) == 0) {
984 //
985 // If the head Guard is not a tail Guard of adjacent memory block,
986 // free it; otherwise, keep it.
987 //
988 Start -= EFI_PAGES_TO_SIZE (1);
989 PagesToFree += 1;
990 }
991 } else {
992 //
993 // No Head Guard, and pages before memory to free are still in Guard. It's a
994 // partial free case. We need to keep one page to be a tail Guard.
995 //
996 Start += EFI_PAGES_TO_SIZE (1);
997 PagesToFree -= 1;
998 }
999
1000 //
1001 // Tail Guard must be the page after this memory block to free, if any.
1002 //
1003 // MSB-> 1 0 <-LSB
1004 // --------------------
1005 // 1 0 <- Tail Guard -> Don't free Tail Guard (shared Guard)
1006 // 0 0 <- Tail Guard -> Free Tail Guard either (not shared Guard)
1007 // X 1 -> Don't free last page (need a new Guard)
1008 // (it'll be turned into a Guard page later)
1009 // --------------------
1010 // +1 +0 <- End
1011 //
1012 MemoryToTest = Start + EFI_PAGES_TO_SIZE (PagesToFree);
1013 GuardBitmap = GetGuardedMemoryBits (MemoryToTest, 2);
1014 if ((GuardBitmap & BIT0) == 0) {
1015 //
1016 // Tail Guard exists.
1017 //
1018 if ((GuardBitmap & BIT1) == 0) {
1019 //
1020 // If the tail Guard is not a head Guard of adjacent memory block,
1021 // free it; otherwise, keep it.
1022 //
1023 PagesToFree += 1;
1024 }
1025 } else if (PagesToFree > 0) {
1026 //
1027 // No Tail Guard, and pages after memory to free are still in Guard. It's a
1028 // partial free case. We need to keep one page to be a head Guard.
1029 //
1030 PagesToFree -= 1;
1031 }
1032
1033 *Memory = Start;
1034 *NumberOfPages = PagesToFree;
1035 }
1036
1037 /**
1038 Adjust the base and number of pages to really allocate according to Guard.
1039
1040 @param[in,out] Memory Base address of free memory.
1041 @param[in,out] NumberOfPages Size of memory to allocate.
1042
1043 @return VOID.
1044 **/
1045 VOID
1046 AdjustMemoryA (
1047 IN OUT EFI_PHYSICAL_ADDRESS *Memory,
1048 IN OUT UINTN *NumberOfPages
1049 )
1050 {
1051 //
1052 // FindFreePages() has already taken the Guard into account. It's safe to
1053 // adjust the start address and/or number of pages here, to make sure that
1054 // the Guards are also "allocated".
1055 //
1056 if (!IsGuardPage (*Memory + EFI_PAGES_TO_SIZE (*NumberOfPages))) {
1057 // No tail Guard, add one.
1058 *NumberOfPages += 1;
1059 }
1060
1061 if (!IsGuardPage (*Memory - EFI_PAGE_SIZE)) {
1062 // No head Guard, add one.
1063 *Memory -= EFI_PAGE_SIZE;
1064 *NumberOfPages += 1;
1065 }
1066 }
1067
1068 /**
1069 Adjust the pool head position to make sure the Guard page is adjavent to
1070 pool tail or pool head.
1071
1072 @param[in] Memory Base address of memory allocated.
1073 @param[in] NoPages Number of pages actually allocated.
1074 @param[in] Size Size of memory requested.
1075 (plus pool head/tail overhead)
1076
1077 @return Address of pool head.
1078 **/
1079 VOID *
1080 AdjustPoolHeadA (
1081 IN EFI_PHYSICAL_ADDRESS Memory,
1082 IN UINTN NoPages,
1083 IN UINTN Size
1084 )
1085 {
1086 if (Memory == 0 || (PcdGet8 (PcdHeapGuardPropertyMask) & BIT7) != 0) {
1087 //
1088 // Pool head is put near the head Guard
1089 //
1090 return (VOID *)(UINTN)Memory;
1091 }
1092
1093 //
1094 // Pool head is put near the tail Guard
1095 //
1096 Size = ALIGN_VALUE (Size, 8);
1097 return (VOID *)(UINTN)(Memory + EFI_PAGES_TO_SIZE (NoPages) - Size);
1098 }
1099
1100 /**
1101 Get the page base address according to pool head address.
1102
1103 @param[in] Memory Head address of pool to free.
1104
1105 @return Address of pool head.
1106 **/
1107 VOID *
1108 AdjustPoolHeadF (
1109 IN EFI_PHYSICAL_ADDRESS Memory
1110 )
1111 {
1112 if (Memory == 0 || (PcdGet8 (PcdHeapGuardPropertyMask) & BIT7) != 0) {
1113 //
1114 // Pool head is put near the head Guard
1115 //
1116 return (VOID *)(UINTN)Memory;
1117 }
1118
1119 //
1120 // Pool head is put near the tail Guard
1121 //
1122 return (VOID *)(UINTN)(Memory & ~EFI_PAGE_MASK);
1123 }
1124
1125 /**
1126 Allocate or free guarded memory.
1127
1128 @param[in] Start Start address of memory to allocate or free.
1129 @param[in] NumberOfPages Memory size in pages.
1130 @param[in] NewType Memory type to convert to.
1131
1132 @return VOID.
1133 **/
1134 EFI_STATUS
1135 CoreConvertPagesWithGuard (
1136 IN UINT64 Start,
1137 IN UINTN NumberOfPages,
1138 IN EFI_MEMORY_TYPE NewType
1139 )
1140 {
1141 UINT64 OldStart;
1142 UINTN OldPages;
1143
1144 if (NewType == EfiConventionalMemory) {
1145 OldStart = Start;
1146 OldPages = NumberOfPages;
1147
1148 AdjustMemoryF (&Start, &NumberOfPages);
1149 //
1150 // It's safe to unset Guard page inside memory lock because there should
1151 // be no memory allocation occurred in updating memory page attribute at
1152 // this point. And unsetting Guard page before free will prevent Guard
1153 // page just freed back to pool from being allocated right away before
1154 // marking it usable (from non-present to present).
1155 //
1156 UnsetGuardForMemory (OldStart, OldPages);
1157 if (NumberOfPages == 0) {
1158 return EFI_SUCCESS;
1159 }
1160 } else {
1161 AdjustMemoryA (&Start, &NumberOfPages);
1162 }
1163
1164 return CoreConvertPages (Start, NumberOfPages, NewType);
1165 }
1166
1167 /**
1168 Helper function to convert a UINT64 value in binary to a string.
1169
1170 @param[in] Value Value of a UINT64 integer.
1171 @param[out] BinString String buffer to contain the conversion result.
1172
1173 @return VOID.
1174 **/
1175 VOID
1176 Uint64ToBinString (
1177 IN UINT64 Value,
1178 OUT CHAR8 *BinString
1179 )
1180 {
1181 UINTN Index;
1182
1183 if (BinString == NULL) {
1184 return;
1185 }
1186
1187 for (Index = 64; Index > 0; --Index) {
1188 BinString[Index - 1] = '0' + (Value & 1);
1189 Value = RShiftU64 (Value, 1);
1190 }
1191 BinString[64] = '\0';
1192 }
1193
1194 /**
1195 Dump the guarded memory bit map.
1196 **/
1197 VOID
1198 EFIAPI
1199 DumpGuardedMemoryBitmap (
1200 VOID
1201 )
1202 {
1203 UINTN Entries[GUARDED_HEAP_MAP_TABLE_DEPTH];
1204 UINTN Shifts[GUARDED_HEAP_MAP_TABLE_DEPTH];
1205 UINTN Indices[GUARDED_HEAP_MAP_TABLE_DEPTH];
1206 UINT64 Tables[GUARDED_HEAP_MAP_TABLE_DEPTH];
1207 UINT64 Addresses[GUARDED_HEAP_MAP_TABLE_DEPTH];
1208 UINT64 TableEntry;
1209 UINT64 Address;
1210 INTN Level;
1211 UINTN RepeatZero;
1212 CHAR8 String[GUARDED_HEAP_MAP_ENTRY_BITS + 1];
1213 CHAR8 *Ruler1;
1214 CHAR8 *Ruler2;
1215
1216 if (mGuardedMemoryMap == 0 ||
1217 mMapLevel == 0 ||
1218 mMapLevel > GUARDED_HEAP_MAP_TABLE_DEPTH) {
1219 return;
1220 }
1221
1222 Ruler1 = " 3 2 1 0";
1223 Ruler2 = "FEDCBA9876543210FEDCBA9876543210FEDCBA9876543210FEDCBA9876543210";
1224
1225 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "============================="
1226 " Guarded Memory Bitmap "
1227 "==============================\r\n"));
1228 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, " %a\r\n", Ruler1));
1229 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, " %a\r\n", Ruler2));
1230
1231 CopyMem (Entries, mLevelMask, sizeof (Entries));
1232 CopyMem (Shifts, mLevelShift, sizeof (Shifts));
1233
1234 SetMem (Indices, sizeof(Indices), 0);
1235 SetMem (Tables, sizeof(Tables), 0);
1236 SetMem (Addresses, sizeof(Addresses), 0);
1237
1238 Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;
1239 Tables[Level] = mGuardedMemoryMap;
1240 Address = 0;
1241 RepeatZero = 0;
1242
1243 while (TRUE) {
1244 if (Indices[Level] > Entries[Level]) {
1245
1246 Tables[Level] = 0;
1247 Level -= 1;
1248 RepeatZero = 0;
1249
1250 DEBUG ((
1251 HEAP_GUARD_DEBUG_LEVEL,
1252 "========================================="
1253 "=========================================\r\n"
1254 ));
1255
1256 } else {
1257
1258 TableEntry = ((UINT64 *)(UINTN)Tables[Level])[Indices[Level]];
1259 Address = Addresses[Level];
1260
1261 if (TableEntry == 0) {
1262
1263 if (Level == GUARDED_HEAP_MAP_TABLE_DEPTH - 1) {
1264 if (RepeatZero == 0) {
1265 Uint64ToBinString(TableEntry, String);
1266 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "%016lx: %a\r\n", Address, String));
1267 } else if (RepeatZero == 1) {
1268 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "... : ...\r\n"));
1269 }
1270 RepeatZero += 1;
1271 }
1272
1273 } else if (Level < GUARDED_HEAP_MAP_TABLE_DEPTH - 1) {
1274
1275 Level += 1;
1276 Tables[Level] = TableEntry;
1277 Addresses[Level] = Address;
1278 Indices[Level] = 0;
1279 RepeatZero = 0;
1280
1281 continue;
1282
1283 } else {
1284
1285 RepeatZero = 0;
1286 Uint64ToBinString(TableEntry, String);
1287 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "%016lx: %a\r\n", Address, String));
1288
1289 }
1290 }
1291
1292 if (Level < (GUARDED_HEAP_MAP_TABLE_DEPTH - (INTN)mMapLevel)) {
1293 break;
1294 }
1295
1296 Indices[Level] += 1;
1297 Address = (Level == 0) ? 0 : Addresses[Level - 1];
1298 Addresses[Level] = Address | LShiftU64(Indices[Level], Shifts[Level]);
1299
1300 }
1301 }
1302