MdeModulePkg/Core: fix bits operation error on a boundary condition
[mirror_edk2.git] / MdeModulePkg / Core / Dxe / Mem / HeapGuard.c
1 /** @file
2 UEFI Heap Guard functions.
3
4 Copyright (c) 2017-2018, Intel Corporation. All rights reserved.<BR>
5 This program and the accompanying materials
6 are licensed and made available under the terms and conditions of the BSD License
7 which accompanies this distribution. The full text of the license may be found at
8 http://opensource.org/licenses/bsd-license.php
9
10 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
11 WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
12
13 **/
14
15 #include "DxeMain.h"
16 #include "Imem.h"
17 #include "HeapGuard.h"
18
19 //
20 // Global to avoid infinite reentrance of memory allocation when updating
21 // page table attributes, which may need allocate pages for new PDE/PTE.
22 //
23 GLOBAL_REMOVE_IF_UNREFERENCED BOOLEAN mOnGuarding = FALSE;
24
25 //
26 // Pointer to table tracking the Guarded memory with bitmap, in which '1'
27 // is used to indicate memory guarded. '0' might be free memory or Guard
28 // page itself, depending on status of memory adjacent to it.
29 //
30 GLOBAL_REMOVE_IF_UNREFERENCED UINT64 mGuardedMemoryMap = 0;
31
32 //
33 // Current depth level of map table pointed by mGuardedMemoryMap.
34 // mMapLevel must be initialized at least by 1. It will be automatically
35 // updated according to the address of memory just tracked.
36 //
37 GLOBAL_REMOVE_IF_UNREFERENCED UINTN mMapLevel = 1;
38
39 //
40 // Shift and mask for each level of map table
41 //
42 GLOBAL_REMOVE_IF_UNREFERENCED UINTN mLevelShift[GUARDED_HEAP_MAP_TABLE_DEPTH]
43 = GUARDED_HEAP_MAP_TABLE_DEPTH_SHIFTS;
44 GLOBAL_REMOVE_IF_UNREFERENCED UINTN mLevelMask[GUARDED_HEAP_MAP_TABLE_DEPTH]
45 = GUARDED_HEAP_MAP_TABLE_DEPTH_MASKS;
46
47 /**
48 Set corresponding bits in bitmap table to 1 according to the address.
49
50 @param[in] Address Start address to set for.
51 @param[in] BitNumber Number of bits to set.
52 @param[in] BitMap Pointer to bitmap which covers the Address.
53
54 @return VOID.
55 **/
56 STATIC
57 VOID
58 SetBits (
59 IN EFI_PHYSICAL_ADDRESS Address,
60 IN UINTN BitNumber,
61 IN UINT64 *BitMap
62 )
63 {
64 UINTN Lsbs;
65 UINTN Qwords;
66 UINTN Msbs;
67 UINTN StartBit;
68 UINTN EndBit;
69
70 StartBit = (UINTN)GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address);
71 EndBit = (StartBit + BitNumber - 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
72
73 if ((StartBit + BitNumber) >= GUARDED_HEAP_MAP_ENTRY_BITS) {
74 Msbs = (GUARDED_HEAP_MAP_ENTRY_BITS - StartBit) %
75 GUARDED_HEAP_MAP_ENTRY_BITS;
76 Lsbs = (EndBit + 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
77 Qwords = (BitNumber - Msbs) / GUARDED_HEAP_MAP_ENTRY_BITS;
78 } else {
79 Msbs = BitNumber;
80 Lsbs = 0;
81 Qwords = 0;
82 }
83
84 if (Msbs > 0) {
85 *BitMap |= LShiftU64 (LShiftU64 (1, Msbs) - 1, StartBit);
86 BitMap += 1;
87 }
88
89 if (Qwords > 0) {
90 SetMem64 ((VOID *)BitMap, Qwords * GUARDED_HEAP_MAP_ENTRY_BYTES,
91 (UINT64)-1);
92 BitMap += Qwords;
93 }
94
95 if (Lsbs > 0) {
96 *BitMap |= (LShiftU64 (1, Lsbs) - 1);
97 }
98 }
99
100 /**
101 Set corresponding bits in bitmap table to 0 according to the address.
102
103 @param[in] Address Start address to set for.
104 @param[in] BitNumber Number of bits to set.
105 @param[in] BitMap Pointer to bitmap which covers the Address.
106
107 @return VOID.
108 **/
109 STATIC
110 VOID
111 ClearBits (
112 IN EFI_PHYSICAL_ADDRESS Address,
113 IN UINTN BitNumber,
114 IN UINT64 *BitMap
115 )
116 {
117 UINTN Lsbs;
118 UINTN Qwords;
119 UINTN Msbs;
120 UINTN StartBit;
121 UINTN EndBit;
122
123 StartBit = (UINTN)GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address);
124 EndBit = (StartBit + BitNumber - 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
125
126 if ((StartBit + BitNumber) >= GUARDED_HEAP_MAP_ENTRY_BITS) {
127 Msbs = (GUARDED_HEAP_MAP_ENTRY_BITS - StartBit) %
128 GUARDED_HEAP_MAP_ENTRY_BITS;
129 Lsbs = (EndBit + 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
130 Qwords = (BitNumber - Msbs) / GUARDED_HEAP_MAP_ENTRY_BITS;
131 } else {
132 Msbs = BitNumber;
133 Lsbs = 0;
134 Qwords = 0;
135 }
136
137 if (Msbs > 0) {
138 *BitMap &= ~LShiftU64 (LShiftU64 (1, Msbs) - 1, StartBit);
139 BitMap += 1;
140 }
141
142 if (Qwords > 0) {
143 SetMem64 ((VOID *)BitMap, Qwords * GUARDED_HEAP_MAP_ENTRY_BYTES, 0);
144 BitMap += Qwords;
145 }
146
147 if (Lsbs > 0) {
148 *BitMap &= ~(LShiftU64 (1, Lsbs) - 1);
149 }
150 }
151
152 /**
153 Get corresponding bits in bitmap table according to the address.
154
155 The value of bit 0 corresponds to the status of memory at given Address.
156 No more than 64 bits can be retrieved in one call.
157
158 @param[in] Address Start address to retrieve bits for.
159 @param[in] BitNumber Number of bits to get.
160 @param[in] BitMap Pointer to bitmap which covers the Address.
161
162 @return An integer containing the bits information.
163 **/
164 STATIC
165 UINT64
166 GetBits (
167 IN EFI_PHYSICAL_ADDRESS Address,
168 IN UINTN BitNumber,
169 IN UINT64 *BitMap
170 )
171 {
172 UINTN StartBit;
173 UINTN EndBit;
174 UINTN Lsbs;
175 UINTN Msbs;
176 UINT64 Result;
177
178 ASSERT (BitNumber <= GUARDED_HEAP_MAP_ENTRY_BITS);
179
180 StartBit = (UINTN)GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address);
181 EndBit = (StartBit + BitNumber - 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
182
183 if ((StartBit + BitNumber) > GUARDED_HEAP_MAP_ENTRY_BITS) {
184 Msbs = GUARDED_HEAP_MAP_ENTRY_BITS - StartBit;
185 Lsbs = (EndBit + 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
186 } else {
187 Msbs = BitNumber;
188 Lsbs = 0;
189 }
190
191 if (StartBit == 0 && BitNumber == GUARDED_HEAP_MAP_ENTRY_BITS) {
192 Result = *BitMap;
193 } else {
194 Result = RShiftU64((*BitMap), StartBit) & (LShiftU64(1, Msbs) - 1);
195 if (Lsbs > 0) {
196 BitMap += 1;
197 Result |= LShiftU64 ((*BitMap) & (LShiftU64 (1, Lsbs) - 1), Msbs);
198 }
199 }
200
201 return Result;
202 }
203
204 /**
205 Locate the pointer of bitmap from the guarded memory bitmap tables, which
206 covers the given Address.
207
208 @param[in] Address Start address to search the bitmap for.
209 @param[in] AllocMapUnit Flag to indicate memory allocation for the table.
210 @param[out] BitMap Pointer to bitmap which covers the Address.
211
212 @return The bit number from given Address to the end of current map table.
213 **/
214 UINTN
215 FindGuardedMemoryMap (
216 IN EFI_PHYSICAL_ADDRESS Address,
217 IN BOOLEAN AllocMapUnit,
218 OUT UINT64 **BitMap
219 )
220 {
221 UINTN Level;
222 UINT64 *GuardMap;
223 UINT64 MapMemory;
224 UINTN Index;
225 UINTN Size;
226 UINTN BitsToUnitEnd;
227 EFI_STATUS Status;
228
229 //
230 // Adjust current map table depth according to the address to access
231 //
232 while (AllocMapUnit &&
233 mMapLevel < GUARDED_HEAP_MAP_TABLE_DEPTH &&
234 RShiftU64 (
235 Address,
236 mLevelShift[GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel - 1]
237 ) != 0) {
238
239 if (mGuardedMemoryMap != 0) {
240 Size = (mLevelMask[GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel - 1] + 1)
241 * GUARDED_HEAP_MAP_ENTRY_BYTES;
242 Status = CoreInternalAllocatePages (
243 AllocateAnyPages,
244 EfiBootServicesData,
245 EFI_SIZE_TO_PAGES (Size),
246 &MapMemory,
247 FALSE
248 );
249 ASSERT_EFI_ERROR (Status);
250 ASSERT (MapMemory != 0);
251
252 SetMem ((VOID *)(UINTN)MapMemory, Size, 0);
253
254 *(UINT64 *)(UINTN)MapMemory = mGuardedMemoryMap;
255 mGuardedMemoryMap = MapMemory;
256 }
257
258 mMapLevel++;
259
260 }
261
262 GuardMap = &mGuardedMemoryMap;
263 for (Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;
264 Level < GUARDED_HEAP_MAP_TABLE_DEPTH;
265 ++Level) {
266
267 if (*GuardMap == 0) {
268 if (!AllocMapUnit) {
269 GuardMap = NULL;
270 break;
271 }
272
273 Size = (mLevelMask[Level] + 1) * GUARDED_HEAP_MAP_ENTRY_BYTES;
274 Status = CoreInternalAllocatePages (
275 AllocateAnyPages,
276 EfiBootServicesData,
277 EFI_SIZE_TO_PAGES (Size),
278 &MapMemory,
279 FALSE
280 );
281 ASSERT_EFI_ERROR (Status);
282 ASSERT (MapMemory != 0);
283
284 SetMem ((VOID *)(UINTN)MapMemory, Size, 0);
285 *GuardMap = MapMemory;
286 }
287
288 Index = (UINTN)RShiftU64 (Address, mLevelShift[Level]);
289 Index &= mLevelMask[Level];
290 GuardMap = (UINT64 *)(UINTN)((*GuardMap) + Index * sizeof (UINT64));
291
292 }
293
294 BitsToUnitEnd = GUARDED_HEAP_MAP_BITS - GUARDED_HEAP_MAP_BIT_INDEX (Address);
295 *BitMap = GuardMap;
296
297 return BitsToUnitEnd;
298 }
299
300 /**
301 Set corresponding bits in bitmap table to 1 according to given memory range.
302
303 @param[in] Address Memory address to guard from.
304 @param[in] NumberOfPages Number of pages to guard.
305
306 @return VOID.
307 **/
308 VOID
309 EFIAPI
310 SetGuardedMemoryBits (
311 IN EFI_PHYSICAL_ADDRESS Address,
312 IN UINTN NumberOfPages
313 )
314 {
315 UINT64 *BitMap;
316 UINTN Bits;
317 UINTN BitsToUnitEnd;
318
319 while (NumberOfPages > 0) {
320 BitsToUnitEnd = FindGuardedMemoryMap (Address, TRUE, &BitMap);
321 ASSERT (BitMap != NULL);
322
323 if (NumberOfPages > BitsToUnitEnd) {
324 // Cross map unit
325 Bits = BitsToUnitEnd;
326 } else {
327 Bits = NumberOfPages;
328 }
329
330 SetBits (Address, Bits, BitMap);
331
332 NumberOfPages -= Bits;
333 Address += EFI_PAGES_TO_SIZE (Bits);
334 }
335 }
336
337 /**
338 Clear corresponding bits in bitmap table according to given memory range.
339
340 @param[in] Address Memory address to unset from.
341 @param[in] NumberOfPages Number of pages to unset guard.
342
343 @return VOID.
344 **/
345 VOID
346 EFIAPI
347 ClearGuardedMemoryBits (
348 IN EFI_PHYSICAL_ADDRESS Address,
349 IN UINTN NumberOfPages
350 )
351 {
352 UINT64 *BitMap;
353 UINTN Bits;
354 UINTN BitsToUnitEnd;
355
356 while (NumberOfPages > 0) {
357 BitsToUnitEnd = FindGuardedMemoryMap (Address, TRUE, &BitMap);
358 ASSERT (BitMap != NULL);
359
360 if (NumberOfPages > BitsToUnitEnd) {
361 // Cross map unit
362 Bits = BitsToUnitEnd;
363 } else {
364 Bits = NumberOfPages;
365 }
366
367 ClearBits (Address, Bits, BitMap);
368
369 NumberOfPages -= Bits;
370 Address += EFI_PAGES_TO_SIZE (Bits);
371 }
372 }
373
374 /**
375 Retrieve corresponding bits in bitmap table according to given memory range.
376
377 @param[in] Address Memory address to retrieve from.
378 @param[in] NumberOfPages Number of pages to retrieve.
379
380 @return An integer containing the guarded memory bitmap.
381 **/
382 UINTN
383 GetGuardedMemoryBits (
384 IN EFI_PHYSICAL_ADDRESS Address,
385 IN UINTN NumberOfPages
386 )
387 {
388 UINT64 *BitMap;
389 UINTN Bits;
390 UINTN Result;
391 UINTN Shift;
392 UINTN BitsToUnitEnd;
393
394 ASSERT (NumberOfPages <= GUARDED_HEAP_MAP_ENTRY_BITS);
395
396 Result = 0;
397 Shift = 0;
398 while (NumberOfPages > 0) {
399 BitsToUnitEnd = FindGuardedMemoryMap (Address, FALSE, &BitMap);
400
401 if (NumberOfPages > BitsToUnitEnd) {
402 // Cross map unit
403 Bits = BitsToUnitEnd;
404 } else {
405 Bits = NumberOfPages;
406 }
407
408 if (BitMap != NULL) {
409 Result |= LShiftU64 (GetBits (Address, Bits, BitMap), Shift);
410 }
411
412 Shift += Bits;
413 NumberOfPages -= Bits;
414 Address += EFI_PAGES_TO_SIZE (Bits);
415 }
416
417 return Result;
418 }
419
420 /**
421 Get bit value in bitmap table for the given address.
422
423 @param[in] Address The address to retrieve for.
424
425 @return 1 or 0.
426 **/
427 UINTN
428 EFIAPI
429 GetGuardMapBit (
430 IN EFI_PHYSICAL_ADDRESS Address
431 )
432 {
433 UINT64 *GuardMap;
434
435 FindGuardedMemoryMap (Address, FALSE, &GuardMap);
436 if (GuardMap != NULL) {
437 if (RShiftU64 (*GuardMap,
438 GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address)) & 1) {
439 return 1;
440 }
441 }
442
443 return 0;
444 }
445
446 /**
447 Set the bit in bitmap table for the given address.
448
449 @param[in] Address The address to set for.
450
451 @return VOID.
452 **/
453 VOID
454 EFIAPI
455 SetGuardMapBit (
456 IN EFI_PHYSICAL_ADDRESS Address
457 )
458 {
459 UINT64 *GuardMap;
460 UINT64 BitMask;
461
462 FindGuardedMemoryMap (Address, TRUE, &GuardMap);
463 if (GuardMap != NULL) {
464 BitMask = LShiftU64 (1, GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address));
465 *GuardMap |= BitMask;
466 }
467 }
468
469 /**
470 Clear the bit in bitmap table for the given address.
471
472 @param[in] Address The address to clear for.
473
474 @return VOID.
475 **/
476 VOID
477 EFIAPI
478 ClearGuardMapBit (
479 IN EFI_PHYSICAL_ADDRESS Address
480 )
481 {
482 UINT64 *GuardMap;
483 UINT64 BitMask;
484
485 FindGuardedMemoryMap (Address, TRUE, &GuardMap);
486 if (GuardMap != NULL) {
487 BitMask = LShiftU64 (1, GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address));
488 *GuardMap &= ~BitMask;
489 }
490 }
491
492 /**
493 Check to see if the page at the given address is a Guard page or not.
494
495 @param[in] Address The address to check for.
496
497 @return TRUE The page at Address is a Guard page.
498 @return FALSE The page at Address is not a Guard page.
499 **/
500 BOOLEAN
501 EFIAPI
502 IsGuardPage (
503 IN EFI_PHYSICAL_ADDRESS Address
504 )
505 {
506 UINTN BitMap;
507
508 //
509 // There must be at least one guarded page before and/or after given
510 // address if it's a Guard page. The bitmap pattern should be one of
511 // 001, 100 and 101
512 //
513 BitMap = GetGuardedMemoryBits (Address - EFI_PAGE_SIZE, 3);
514 return ((BitMap == BIT0) || (BitMap == BIT2) || (BitMap == (BIT2 | BIT0)));
515 }
516
517 /**
518 Check to see if the page at the given address is a head Guard page or not.
519
520 @param[in] Address The address to check for
521
522 @return TRUE The page at Address is a head Guard page
523 @return FALSE The page at Address is not a head Guard page
524 **/
525 BOOLEAN
526 EFIAPI
527 IsHeadGuard (
528 IN EFI_PHYSICAL_ADDRESS Address
529 )
530 {
531 return (GetGuardedMemoryBits (Address, 2) == BIT1);
532 }
533
534 /**
535 Check to see if the page at the given address is a tail Guard page or not.
536
537 @param[in] Address The address to check for.
538
539 @return TRUE The page at Address is a tail Guard page.
540 @return FALSE The page at Address is not a tail Guard page.
541 **/
542 BOOLEAN
543 EFIAPI
544 IsTailGuard (
545 IN EFI_PHYSICAL_ADDRESS Address
546 )
547 {
548 return (GetGuardedMemoryBits (Address - EFI_PAGE_SIZE, 2) == BIT0);
549 }
550
551 /**
552 Check to see if the page at the given address is guarded or not.
553
554 @param[in] Address The address to check for.
555
556 @return TRUE The page at Address is guarded.
557 @return FALSE The page at Address is not guarded.
558 **/
559 BOOLEAN
560 EFIAPI
561 IsMemoryGuarded (
562 IN EFI_PHYSICAL_ADDRESS Address
563 )
564 {
565 return (GetGuardMapBit (Address) == 1);
566 }
567
568 /**
569 Set the page at the given address to be a Guard page.
570
571 This is done by changing the page table attribute to be NOT PRSENT.
572
573 @param[in] BaseAddress Page address to Guard at
574
575 @return VOID
576 **/
577 VOID
578 EFIAPI
579 SetGuardPage (
580 IN EFI_PHYSICAL_ADDRESS BaseAddress
581 )
582 {
583 if (gCpu == NULL) {
584 return;
585 }
586
587 //
588 // Set flag to make sure allocating memory without GUARD for page table
589 // operation; otherwise infinite loops could be caused.
590 //
591 mOnGuarding = TRUE;
592 //
593 // Note: This might overwrite other attributes needed by other features,
594 // such as NX memory protection.
595 //
596 gCpu->SetMemoryAttributes (gCpu, BaseAddress, EFI_PAGE_SIZE, EFI_MEMORY_RP);
597 mOnGuarding = FALSE;
598 }
599
600 /**
601 Unset the Guard page at the given address to the normal memory.
602
603 This is done by changing the page table attribute to be PRSENT.
604
605 @param[in] BaseAddress Page address to Guard at.
606
607 @return VOID.
608 **/
609 VOID
610 EFIAPI
611 UnsetGuardPage (
612 IN EFI_PHYSICAL_ADDRESS BaseAddress
613 )
614 {
615 UINT64 Attributes;
616
617 if (gCpu == NULL) {
618 return;
619 }
620
621 //
622 // Once the Guard page is unset, it will be freed back to memory pool. NX
623 // memory protection must be restored for this page if NX is enabled for free
624 // memory.
625 //
626 Attributes = 0;
627 if ((PcdGet64 (PcdDxeNxMemoryProtectionPolicy) & (1 << EfiConventionalMemory)) != 0) {
628 Attributes |= EFI_MEMORY_XP;
629 }
630
631 //
632 // Set flag to make sure allocating memory without GUARD for page table
633 // operation; otherwise infinite loops could be caused.
634 //
635 mOnGuarding = TRUE;
636 //
637 // Note: This might overwrite other attributes needed by other features,
638 // such as memory protection (NX). Please make sure they are not enabled
639 // at the same time.
640 //
641 gCpu->SetMemoryAttributes (gCpu, BaseAddress, EFI_PAGE_SIZE, Attributes);
642 mOnGuarding = FALSE;
643 }
644
645 /**
646 Check to see if the memory at the given address should be guarded or not.
647
648 @param[in] MemoryType Memory type to check.
649 @param[in] AllocateType Allocation type to check.
650 @param[in] PageOrPool Indicate a page allocation or pool allocation.
651
652
653 @return TRUE The given type of memory should be guarded.
654 @return FALSE The given type of memory should not be guarded.
655 **/
656 BOOLEAN
657 IsMemoryTypeToGuard (
658 IN EFI_MEMORY_TYPE MemoryType,
659 IN EFI_ALLOCATE_TYPE AllocateType,
660 IN UINT8 PageOrPool
661 )
662 {
663 UINT64 TestBit;
664 UINT64 ConfigBit;
665 BOOLEAN InSmm;
666
667 if (AllocateType == AllocateAddress) {
668 return FALSE;
669 }
670
671 InSmm = FALSE;
672 if (gSmmBase2 != NULL) {
673 gSmmBase2->InSmm (gSmmBase2, &InSmm);
674 }
675
676 if (InSmm) {
677 return FALSE;
678 }
679
680 if ((PcdGet8 (PcdHeapGuardPropertyMask) & PageOrPool) == 0) {
681 return FALSE;
682 }
683
684 if (PageOrPool == GUARD_HEAP_TYPE_POOL) {
685 ConfigBit = PcdGet64 (PcdHeapGuardPoolType);
686 } else if (PageOrPool == GUARD_HEAP_TYPE_PAGE) {
687 ConfigBit = PcdGet64 (PcdHeapGuardPageType);
688 } else {
689 ConfigBit = (UINT64)-1;
690 }
691
692 if ((UINT32)MemoryType >= MEMORY_TYPE_OS_RESERVED_MIN) {
693 TestBit = BIT63;
694 } else if ((UINT32) MemoryType >= MEMORY_TYPE_OEM_RESERVED_MIN) {
695 TestBit = BIT62;
696 } else if (MemoryType < EfiMaxMemoryType) {
697 TestBit = LShiftU64 (1, MemoryType);
698 } else if (MemoryType == EfiMaxMemoryType) {
699 TestBit = (UINT64)-1;
700 } else {
701 TestBit = 0;
702 }
703
704 return ((ConfigBit & TestBit) != 0);
705 }
706
707 /**
708 Check to see if the pool at the given address should be guarded or not.
709
710 @param[in] MemoryType Pool type to check.
711
712
713 @return TRUE The given type of pool should be guarded.
714 @return FALSE The given type of pool should not be guarded.
715 **/
716 BOOLEAN
717 IsPoolTypeToGuard (
718 IN EFI_MEMORY_TYPE MemoryType
719 )
720 {
721 return IsMemoryTypeToGuard (MemoryType, AllocateAnyPages,
722 GUARD_HEAP_TYPE_POOL);
723 }
724
725 /**
726 Check to see if the page at the given address should be guarded or not.
727
728 @param[in] MemoryType Page type to check.
729 @param[in] AllocateType Allocation type to check.
730
731 @return TRUE The given type of page should be guarded.
732 @return FALSE The given type of page should not be guarded.
733 **/
734 BOOLEAN
735 IsPageTypeToGuard (
736 IN EFI_MEMORY_TYPE MemoryType,
737 IN EFI_ALLOCATE_TYPE AllocateType
738 )
739 {
740 return IsMemoryTypeToGuard (MemoryType, AllocateType, GUARD_HEAP_TYPE_PAGE);
741 }
742
743 /**
744 Check to see if the heap guard is enabled for page and/or pool allocation.
745
746 @return TRUE/FALSE.
747 **/
748 BOOLEAN
749 IsHeapGuardEnabled (
750 VOID
751 )
752 {
753 return IsMemoryTypeToGuard (EfiMaxMemoryType, AllocateAnyPages,
754 GUARD_HEAP_TYPE_POOL|GUARD_HEAP_TYPE_PAGE);
755 }
756
757 /**
758 Set head Guard and tail Guard for the given memory range.
759
760 @param[in] Memory Base address of memory to set guard for.
761 @param[in] NumberOfPages Memory size in pages.
762
763 @return VOID
764 **/
765 VOID
766 SetGuardForMemory (
767 IN EFI_PHYSICAL_ADDRESS Memory,
768 IN UINTN NumberOfPages
769 )
770 {
771 EFI_PHYSICAL_ADDRESS GuardPage;
772
773 //
774 // Set tail Guard
775 //
776 GuardPage = Memory + EFI_PAGES_TO_SIZE (NumberOfPages);
777 if (!IsGuardPage (GuardPage)) {
778 SetGuardPage (GuardPage);
779 }
780
781 // Set head Guard
782 GuardPage = Memory - EFI_PAGES_TO_SIZE (1);
783 if (!IsGuardPage (GuardPage)) {
784 SetGuardPage (GuardPage);
785 }
786
787 //
788 // Mark the memory range as Guarded
789 //
790 SetGuardedMemoryBits (Memory, NumberOfPages);
791 }
792
793 /**
794 Unset head Guard and tail Guard for the given memory range.
795
796 @param[in] Memory Base address of memory to unset guard for.
797 @param[in] NumberOfPages Memory size in pages.
798
799 @return VOID
800 **/
801 VOID
802 UnsetGuardForMemory (
803 IN EFI_PHYSICAL_ADDRESS Memory,
804 IN UINTN NumberOfPages
805 )
806 {
807 EFI_PHYSICAL_ADDRESS GuardPage;
808 UINT64 GuardBitmap;
809
810 if (NumberOfPages == 0) {
811 return;
812 }
813
814 //
815 // Head Guard must be one page before, if any.
816 //
817 // MSB-> 1 0 <-LSB
818 // -------------------
819 // Head Guard -> 0 1 -> Don't free Head Guard (shared Guard)
820 // Head Guard -> 0 0 -> Free Head Guard either (not shared Guard)
821 // 1 X -> Don't free first page (need a new Guard)
822 // (it'll be turned into a Guard page later)
823 // -------------------
824 // Start -> -1 -2
825 //
826 GuardPage = Memory - EFI_PAGES_TO_SIZE (1);
827 GuardBitmap = GetGuardedMemoryBits (Memory - EFI_PAGES_TO_SIZE (2), 2);
828 if ((GuardBitmap & BIT1) == 0) {
829 //
830 // Head Guard exists.
831 //
832 if ((GuardBitmap & BIT0) == 0) {
833 //
834 // If the head Guard is not a tail Guard of adjacent memory block,
835 // unset it.
836 //
837 UnsetGuardPage (GuardPage);
838 }
839 } else {
840 //
841 // Pages before memory to free are still in Guard. It's a partial free
842 // case. Turn first page of memory block to free into a new Guard.
843 //
844 SetGuardPage (Memory);
845 }
846
847 //
848 // Tail Guard must be the page after this memory block to free, if any.
849 //
850 // MSB-> 1 0 <-LSB
851 // --------------------
852 // 1 0 <- Tail Guard -> Don't free Tail Guard (shared Guard)
853 // 0 0 <- Tail Guard -> Free Tail Guard either (not shared Guard)
854 // X 1 -> Don't free last page (need a new Guard)
855 // (it'll be turned into a Guard page later)
856 // --------------------
857 // +1 +0 <- End
858 //
859 GuardPage = Memory + EFI_PAGES_TO_SIZE (NumberOfPages);
860 GuardBitmap = GetGuardedMemoryBits (GuardPage, 2);
861 if ((GuardBitmap & BIT0) == 0) {
862 //
863 // Tail Guard exists.
864 //
865 if ((GuardBitmap & BIT1) == 0) {
866 //
867 // If the tail Guard is not a head Guard of adjacent memory block,
868 // free it; otherwise, keep it.
869 //
870 UnsetGuardPage (GuardPage);
871 }
872 } else {
873 //
874 // Pages after memory to free are still in Guard. It's a partial free
875 // case. We need to keep one page to be a head Guard.
876 //
877 SetGuardPage (GuardPage - EFI_PAGES_TO_SIZE (1));
878 }
879
880 //
881 // No matter what, we just clear the mark of the Guarded memory.
882 //
883 ClearGuardedMemoryBits(Memory, NumberOfPages);
884 }
885
886 /**
887 Adjust address of free memory according to existing and/or required Guard.
888
889 This function will check if there're existing Guard pages of adjacent
890 memory blocks, and try to use it as the Guard page of the memory to be
891 allocated.
892
893 @param[in] Start Start address of free memory block.
894 @param[in] Size Size of free memory block.
895 @param[in] SizeRequested Size of memory to allocate.
896
897 @return The end address of memory block found.
898 @return 0 if no enough space for the required size of memory and its Guard.
899 **/
900 UINT64
901 AdjustMemoryS (
902 IN UINT64 Start,
903 IN UINT64 Size,
904 IN UINT64 SizeRequested
905 )
906 {
907 UINT64 Target;
908
909 //
910 // UEFI spec requires that allocated pool must be 8-byte aligned. If it's
911 // indicated to put the pool near the Tail Guard, we need extra bytes to
912 // make sure alignment of the returned pool address.
913 //
914 if ((PcdGet8 (PcdHeapGuardPropertyMask) & BIT7) == 0) {
915 SizeRequested = ALIGN_VALUE(SizeRequested, 8);
916 }
917
918 Target = Start + Size - SizeRequested;
919 ASSERT (Target >= Start);
920 if (Target == 0) {
921 return 0;
922 }
923
924 if (!IsGuardPage (Start + Size)) {
925 // No Guard at tail to share. One more page is needed.
926 Target -= EFI_PAGES_TO_SIZE (1);
927 }
928
929 // Out of range?
930 if (Target < Start) {
931 return 0;
932 }
933
934 // At the edge?
935 if (Target == Start) {
936 if (!IsGuardPage (Target - EFI_PAGES_TO_SIZE (1))) {
937 // No enough space for a new head Guard if no Guard at head to share.
938 return 0;
939 }
940 }
941
942 // OK, we have enough pages for memory and its Guards. Return the End of the
943 // free space.
944 return Target + SizeRequested - 1;
945 }
946
947 /**
948 Adjust the start address and number of pages to free according to Guard.
949
950 The purpose of this function is to keep the shared Guard page with adjacent
951 memory block if it's still in guard, or free it if no more sharing. Another
952 is to reserve pages as Guard pages in partial page free situation.
953
954 @param[in,out] Memory Base address of memory to free.
955 @param[in,out] NumberOfPages Size of memory to free.
956
957 @return VOID.
958 **/
959 VOID
960 AdjustMemoryF (
961 IN OUT EFI_PHYSICAL_ADDRESS *Memory,
962 IN OUT UINTN *NumberOfPages
963 )
964 {
965 EFI_PHYSICAL_ADDRESS Start;
966 EFI_PHYSICAL_ADDRESS MemoryToTest;
967 UINTN PagesToFree;
968 UINT64 GuardBitmap;
969
970 if (Memory == NULL || NumberOfPages == NULL || *NumberOfPages == 0) {
971 return;
972 }
973
974 Start = *Memory;
975 PagesToFree = *NumberOfPages;
976
977 //
978 // Head Guard must be one page before, if any.
979 //
980 // MSB-> 1 0 <-LSB
981 // -------------------
982 // Head Guard -> 0 1 -> Don't free Head Guard (shared Guard)
983 // Head Guard -> 0 0 -> Free Head Guard either (not shared Guard)
984 // 1 X -> Don't free first page (need a new Guard)
985 // (it'll be turned into a Guard page later)
986 // -------------------
987 // Start -> -1 -2
988 //
989 MemoryToTest = Start - EFI_PAGES_TO_SIZE (2);
990 GuardBitmap = GetGuardedMemoryBits (MemoryToTest, 2);
991 if ((GuardBitmap & BIT1) == 0) {
992 //
993 // Head Guard exists.
994 //
995 if ((GuardBitmap & BIT0) == 0) {
996 //
997 // If the head Guard is not a tail Guard of adjacent memory block,
998 // free it; otherwise, keep it.
999 //
1000 Start -= EFI_PAGES_TO_SIZE (1);
1001 PagesToFree += 1;
1002 }
1003 } else {
1004 //
1005 // No Head Guard, and pages before memory to free are still in Guard. It's a
1006 // partial free case. We need to keep one page to be a tail Guard.
1007 //
1008 Start += EFI_PAGES_TO_SIZE (1);
1009 PagesToFree -= 1;
1010 }
1011
1012 //
1013 // Tail Guard must be the page after this memory block to free, if any.
1014 //
1015 // MSB-> 1 0 <-LSB
1016 // --------------------
1017 // 1 0 <- Tail Guard -> Don't free Tail Guard (shared Guard)
1018 // 0 0 <- Tail Guard -> Free Tail Guard either (not shared Guard)
1019 // X 1 -> Don't free last page (need a new Guard)
1020 // (it'll be turned into a Guard page later)
1021 // --------------------
1022 // +1 +0 <- End
1023 //
1024 MemoryToTest = Start + EFI_PAGES_TO_SIZE (PagesToFree);
1025 GuardBitmap = GetGuardedMemoryBits (MemoryToTest, 2);
1026 if ((GuardBitmap & BIT0) == 0) {
1027 //
1028 // Tail Guard exists.
1029 //
1030 if ((GuardBitmap & BIT1) == 0) {
1031 //
1032 // If the tail Guard is not a head Guard of adjacent memory block,
1033 // free it; otherwise, keep it.
1034 //
1035 PagesToFree += 1;
1036 }
1037 } else if (PagesToFree > 0) {
1038 //
1039 // No Tail Guard, and pages after memory to free are still in Guard. It's a
1040 // partial free case. We need to keep one page to be a head Guard.
1041 //
1042 PagesToFree -= 1;
1043 }
1044
1045 *Memory = Start;
1046 *NumberOfPages = PagesToFree;
1047 }
1048
1049 /**
1050 Adjust the base and number of pages to really allocate according to Guard.
1051
1052 @param[in,out] Memory Base address of free memory.
1053 @param[in,out] NumberOfPages Size of memory to allocate.
1054
1055 @return VOID.
1056 **/
1057 VOID
1058 AdjustMemoryA (
1059 IN OUT EFI_PHYSICAL_ADDRESS *Memory,
1060 IN OUT UINTN *NumberOfPages
1061 )
1062 {
1063 //
1064 // FindFreePages() has already taken the Guard into account. It's safe to
1065 // adjust the start address and/or number of pages here, to make sure that
1066 // the Guards are also "allocated".
1067 //
1068 if (!IsGuardPage (*Memory + EFI_PAGES_TO_SIZE (*NumberOfPages))) {
1069 // No tail Guard, add one.
1070 *NumberOfPages += 1;
1071 }
1072
1073 if (!IsGuardPage (*Memory - EFI_PAGE_SIZE)) {
1074 // No head Guard, add one.
1075 *Memory -= EFI_PAGE_SIZE;
1076 *NumberOfPages += 1;
1077 }
1078 }
1079
1080 /**
1081 Adjust the pool head position to make sure the Guard page is adjavent to
1082 pool tail or pool head.
1083
1084 @param[in] Memory Base address of memory allocated.
1085 @param[in] NoPages Number of pages actually allocated.
1086 @param[in] Size Size of memory requested.
1087 (plus pool head/tail overhead)
1088
1089 @return Address of pool head.
1090 **/
1091 VOID *
1092 AdjustPoolHeadA (
1093 IN EFI_PHYSICAL_ADDRESS Memory,
1094 IN UINTN NoPages,
1095 IN UINTN Size
1096 )
1097 {
1098 if (Memory == 0 || (PcdGet8 (PcdHeapGuardPropertyMask) & BIT7) != 0) {
1099 //
1100 // Pool head is put near the head Guard
1101 //
1102 return (VOID *)(UINTN)Memory;
1103 }
1104
1105 //
1106 // Pool head is put near the tail Guard
1107 //
1108 Size = ALIGN_VALUE (Size, 8);
1109 return (VOID *)(UINTN)(Memory + EFI_PAGES_TO_SIZE (NoPages) - Size);
1110 }
1111
1112 /**
1113 Get the page base address according to pool head address.
1114
1115 @param[in] Memory Head address of pool to free.
1116
1117 @return Address of pool head.
1118 **/
1119 VOID *
1120 AdjustPoolHeadF (
1121 IN EFI_PHYSICAL_ADDRESS Memory
1122 )
1123 {
1124 if (Memory == 0 || (PcdGet8 (PcdHeapGuardPropertyMask) & BIT7) != 0) {
1125 //
1126 // Pool head is put near the head Guard
1127 //
1128 return (VOID *)(UINTN)Memory;
1129 }
1130
1131 //
1132 // Pool head is put near the tail Guard
1133 //
1134 return (VOID *)(UINTN)(Memory & ~EFI_PAGE_MASK);
1135 }
1136
1137 /**
1138 Allocate or free guarded memory.
1139
1140 @param[in] Start Start address of memory to allocate or free.
1141 @param[in] NumberOfPages Memory size in pages.
1142 @param[in] NewType Memory type to convert to.
1143
1144 @return VOID.
1145 **/
1146 EFI_STATUS
1147 CoreConvertPagesWithGuard (
1148 IN UINT64 Start,
1149 IN UINTN NumberOfPages,
1150 IN EFI_MEMORY_TYPE NewType
1151 )
1152 {
1153 UINT64 OldStart;
1154 UINTN OldPages;
1155
1156 if (NewType == EfiConventionalMemory) {
1157 OldStart = Start;
1158 OldPages = NumberOfPages;
1159
1160 AdjustMemoryF (&Start, &NumberOfPages);
1161 //
1162 // It's safe to unset Guard page inside memory lock because there should
1163 // be no memory allocation occurred in updating memory page attribute at
1164 // this point. And unsetting Guard page before free will prevent Guard
1165 // page just freed back to pool from being allocated right away before
1166 // marking it usable (from non-present to present).
1167 //
1168 UnsetGuardForMemory (OldStart, OldPages);
1169 if (NumberOfPages == 0) {
1170 return EFI_SUCCESS;
1171 }
1172 } else {
1173 AdjustMemoryA (&Start, &NumberOfPages);
1174 }
1175
1176 return CoreConvertPages (Start, NumberOfPages, NewType);
1177 }
1178
1179 /**
1180 Set all Guard pages which cannot be set before CPU Arch Protocol installed.
1181 **/
1182 VOID
1183 SetAllGuardPages (
1184 VOID
1185 )
1186 {
1187 UINTN Entries[GUARDED_HEAP_MAP_TABLE_DEPTH];
1188 UINTN Shifts[GUARDED_HEAP_MAP_TABLE_DEPTH];
1189 UINTN Indices[GUARDED_HEAP_MAP_TABLE_DEPTH];
1190 UINT64 Tables[GUARDED_HEAP_MAP_TABLE_DEPTH];
1191 UINT64 Addresses[GUARDED_HEAP_MAP_TABLE_DEPTH];
1192 UINT64 TableEntry;
1193 UINT64 Address;
1194 UINT64 GuardPage;
1195 INTN Level;
1196 UINTN Index;
1197 BOOLEAN OnGuarding;
1198
1199 if (mGuardedMemoryMap == 0 ||
1200 mMapLevel == 0 ||
1201 mMapLevel > GUARDED_HEAP_MAP_TABLE_DEPTH) {
1202 return;
1203 }
1204
1205 CopyMem (Entries, mLevelMask, sizeof (Entries));
1206 CopyMem (Shifts, mLevelShift, sizeof (Shifts));
1207
1208 SetMem (Tables, sizeof(Tables), 0);
1209 SetMem (Addresses, sizeof(Addresses), 0);
1210 SetMem (Indices, sizeof(Indices), 0);
1211
1212 Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;
1213 Tables[Level] = mGuardedMemoryMap;
1214 Address = 0;
1215 OnGuarding = FALSE;
1216
1217 DEBUG_CODE (
1218 DumpGuardedMemoryBitmap ();
1219 );
1220
1221 while (TRUE) {
1222 if (Indices[Level] > Entries[Level]) {
1223 Tables[Level] = 0;
1224 Level -= 1;
1225 } else {
1226
1227 TableEntry = ((UINT64 *)(UINTN)(Tables[Level]))[Indices[Level]];
1228 Address = Addresses[Level];
1229
1230 if (TableEntry == 0) {
1231
1232 OnGuarding = FALSE;
1233
1234 } else if (Level < GUARDED_HEAP_MAP_TABLE_DEPTH - 1) {
1235
1236 Level += 1;
1237 Tables[Level] = TableEntry;
1238 Addresses[Level] = Address;
1239 Indices[Level] = 0;
1240
1241 continue;
1242
1243 } else {
1244
1245 Index = 0;
1246 while (Index < GUARDED_HEAP_MAP_ENTRY_BITS) {
1247 if ((TableEntry & 1) == 1) {
1248 if (OnGuarding) {
1249 GuardPage = 0;
1250 } else {
1251 GuardPage = Address - EFI_PAGE_SIZE;
1252 }
1253 OnGuarding = TRUE;
1254 } else {
1255 if (OnGuarding) {
1256 GuardPage = Address;
1257 } else {
1258 GuardPage = 0;
1259 }
1260 OnGuarding = FALSE;
1261 }
1262
1263 if (GuardPage != 0) {
1264 SetGuardPage (GuardPage);
1265 }
1266
1267 if (TableEntry == 0) {
1268 break;
1269 }
1270
1271 TableEntry = RShiftU64 (TableEntry, 1);
1272 Address += EFI_PAGE_SIZE;
1273 Index += 1;
1274 }
1275 }
1276 }
1277
1278 if (Level < (GUARDED_HEAP_MAP_TABLE_DEPTH - (INTN)mMapLevel)) {
1279 break;
1280 }
1281
1282 Indices[Level] += 1;
1283 Address = (Level == 0) ? 0 : Addresses[Level - 1];
1284 Addresses[Level] = Address | LShiftU64(Indices[Level], Shifts[Level]);
1285
1286 }
1287 }
1288
1289 /**
1290 Notify function used to set all Guard pages before CPU Arch Protocol installed.
1291 **/
1292 VOID
1293 HeapGuardCpuArchProtocolNotify (
1294 VOID
1295 )
1296 {
1297 ASSERT (gCpu != NULL);
1298 SetAllGuardPages ();
1299 }
1300
1301 /**
1302 Helper function to convert a UINT64 value in binary to a string.
1303
1304 @param[in] Value Value of a UINT64 integer.
1305 @param[out] BinString String buffer to contain the conversion result.
1306
1307 @return VOID.
1308 **/
1309 VOID
1310 Uint64ToBinString (
1311 IN UINT64 Value,
1312 OUT CHAR8 *BinString
1313 )
1314 {
1315 UINTN Index;
1316
1317 if (BinString == NULL) {
1318 return;
1319 }
1320
1321 for (Index = 64; Index > 0; --Index) {
1322 BinString[Index - 1] = '0' + (Value & 1);
1323 Value = RShiftU64 (Value, 1);
1324 }
1325 BinString[64] = '\0';
1326 }
1327
1328 /**
1329 Dump the guarded memory bit map.
1330 **/
1331 VOID
1332 EFIAPI
1333 DumpGuardedMemoryBitmap (
1334 VOID
1335 )
1336 {
1337 UINTN Entries[GUARDED_HEAP_MAP_TABLE_DEPTH];
1338 UINTN Shifts[GUARDED_HEAP_MAP_TABLE_DEPTH];
1339 UINTN Indices[GUARDED_HEAP_MAP_TABLE_DEPTH];
1340 UINT64 Tables[GUARDED_HEAP_MAP_TABLE_DEPTH];
1341 UINT64 Addresses[GUARDED_HEAP_MAP_TABLE_DEPTH];
1342 UINT64 TableEntry;
1343 UINT64 Address;
1344 INTN Level;
1345 UINTN RepeatZero;
1346 CHAR8 String[GUARDED_HEAP_MAP_ENTRY_BITS + 1];
1347 CHAR8 *Ruler1;
1348 CHAR8 *Ruler2;
1349
1350 if (mGuardedMemoryMap == 0 ||
1351 mMapLevel == 0 ||
1352 mMapLevel > GUARDED_HEAP_MAP_TABLE_DEPTH) {
1353 return;
1354 }
1355
1356 Ruler1 = " 3 2 1 0";
1357 Ruler2 = "FEDCBA9876543210FEDCBA9876543210FEDCBA9876543210FEDCBA9876543210";
1358
1359 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "============================="
1360 " Guarded Memory Bitmap "
1361 "==============================\r\n"));
1362 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, " %a\r\n", Ruler1));
1363 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, " %a\r\n", Ruler2));
1364
1365 CopyMem (Entries, mLevelMask, sizeof (Entries));
1366 CopyMem (Shifts, mLevelShift, sizeof (Shifts));
1367
1368 SetMem (Indices, sizeof(Indices), 0);
1369 SetMem (Tables, sizeof(Tables), 0);
1370 SetMem (Addresses, sizeof(Addresses), 0);
1371
1372 Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;
1373 Tables[Level] = mGuardedMemoryMap;
1374 Address = 0;
1375 RepeatZero = 0;
1376
1377 while (TRUE) {
1378 if (Indices[Level] > Entries[Level]) {
1379
1380 Tables[Level] = 0;
1381 Level -= 1;
1382 RepeatZero = 0;
1383
1384 DEBUG ((
1385 HEAP_GUARD_DEBUG_LEVEL,
1386 "========================================="
1387 "=========================================\r\n"
1388 ));
1389
1390 } else {
1391
1392 TableEntry = ((UINT64 *)(UINTN)Tables[Level])[Indices[Level]];
1393 Address = Addresses[Level];
1394
1395 if (TableEntry == 0) {
1396
1397 if (Level == GUARDED_HEAP_MAP_TABLE_DEPTH - 1) {
1398 if (RepeatZero == 0) {
1399 Uint64ToBinString(TableEntry, String);
1400 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "%016lx: %a\r\n", Address, String));
1401 } else if (RepeatZero == 1) {
1402 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "... : ...\r\n"));
1403 }
1404 RepeatZero += 1;
1405 }
1406
1407 } else if (Level < GUARDED_HEAP_MAP_TABLE_DEPTH - 1) {
1408
1409 Level += 1;
1410 Tables[Level] = TableEntry;
1411 Addresses[Level] = Address;
1412 Indices[Level] = 0;
1413 RepeatZero = 0;
1414
1415 continue;
1416
1417 } else {
1418
1419 RepeatZero = 0;
1420 Uint64ToBinString(TableEntry, String);
1421 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "%016lx: %a\r\n", Address, String));
1422
1423 }
1424 }
1425
1426 if (Level < (GUARDED_HEAP_MAP_TABLE_DEPTH - (INTN)mMapLevel)) {
1427 break;
1428 }
1429
1430 Indices[Level] += 1;
1431 Address = (Level == 0) ? 0 : Addresses[Level - 1];
1432 Addresses[Level] = Address | LShiftU64(Indices[Level], Shifts[Level]);
1433
1434 }
1435 }
1436