]> git.proxmox.com Git - mirror_edk2.git/blob - MdeModulePkg/Core/Dxe/Mem/HeapGuard.c
MdeModulePkg/Core: fix feature conflict between NX and NULL detection
[mirror_edk2.git] / MdeModulePkg / Core / Dxe / Mem / HeapGuard.c
1 /** @file
2 UEFI Heap Guard functions.
3
4 Copyright (c) 2017, Intel Corporation. All rights reserved.<BR>
5 This program and the accompanying materials
6 are licensed and made available under the terms and conditions of the BSD License
7 which accompanies this distribution. The full text of the license may be found at
8 http://opensource.org/licenses/bsd-license.php
9
10 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
11 WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
12
13 **/
14
15 #include "DxeMain.h"
16 #include "Imem.h"
17 #include "HeapGuard.h"
18
19 //
20 // Global to avoid infinite reentrance of memory allocation when updating
21 // page table attributes, which may need allocate pages for new PDE/PTE.
22 //
23 GLOBAL_REMOVE_IF_UNREFERENCED BOOLEAN mOnGuarding = FALSE;
24
25 //
26 // Pointer to table tracking the Guarded memory with bitmap, in which '1'
27 // is used to indicate memory guarded. '0' might be free memory or Guard
28 // page itself, depending on status of memory adjacent to it.
29 //
30 GLOBAL_REMOVE_IF_UNREFERENCED UINT64 mGuardedMemoryMap = 0;
31
32 //
33 // Current depth level of map table pointed by mGuardedMemoryMap.
34 // mMapLevel must be initialized at least by 1. It will be automatically
35 // updated according to the address of memory just tracked.
36 //
37 GLOBAL_REMOVE_IF_UNREFERENCED UINTN mMapLevel = 1;
38
39 //
40 // Shift and mask for each level of map table
41 //
42 GLOBAL_REMOVE_IF_UNREFERENCED UINTN mLevelShift[GUARDED_HEAP_MAP_TABLE_DEPTH]
43 = GUARDED_HEAP_MAP_TABLE_DEPTH_SHIFTS;
44 GLOBAL_REMOVE_IF_UNREFERENCED UINTN mLevelMask[GUARDED_HEAP_MAP_TABLE_DEPTH]
45 = GUARDED_HEAP_MAP_TABLE_DEPTH_MASKS;
46
47 /**
48 Set corresponding bits in bitmap table to 1 according to the address.
49
50 @param[in] Address Start address to set for.
51 @param[in] BitNumber Number of bits to set.
52 @param[in] BitMap Pointer to bitmap which covers the Address.
53
54 @return VOID.
55 **/
56 STATIC
57 VOID
58 SetBits (
59 IN EFI_PHYSICAL_ADDRESS Address,
60 IN UINTN BitNumber,
61 IN UINT64 *BitMap
62 )
63 {
64 UINTN Lsbs;
65 UINTN Qwords;
66 UINTN Msbs;
67 UINTN StartBit;
68 UINTN EndBit;
69
70 StartBit = (UINTN)GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address);
71 EndBit = (StartBit + BitNumber - 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
72
73 if ((StartBit + BitNumber) > GUARDED_HEAP_MAP_ENTRY_BITS) {
74 Msbs = (GUARDED_HEAP_MAP_ENTRY_BITS - StartBit) %
75 GUARDED_HEAP_MAP_ENTRY_BITS;
76 Lsbs = (EndBit + 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
77 Qwords = (BitNumber - Msbs) / GUARDED_HEAP_MAP_ENTRY_BITS;
78 } else {
79 Msbs = BitNumber;
80 Lsbs = 0;
81 Qwords = 0;
82 }
83
84 if (Msbs > 0) {
85 *BitMap |= LShiftU64 (LShiftU64 (1, Msbs) - 1, StartBit);
86 BitMap += 1;
87 }
88
89 if (Qwords > 0) {
90 SetMem64 ((VOID *)BitMap, Qwords * GUARDED_HEAP_MAP_ENTRY_BYTES,
91 (UINT64)-1);
92 BitMap += Qwords;
93 }
94
95 if (Lsbs > 0) {
96 *BitMap |= (LShiftU64 (1, Lsbs) - 1);
97 }
98 }
99
100 /**
101 Set corresponding bits in bitmap table to 0 according to the address.
102
103 @param[in] Address Start address to set for.
104 @param[in] BitNumber Number of bits to set.
105 @param[in] BitMap Pointer to bitmap which covers the Address.
106
107 @return VOID.
108 **/
109 STATIC
110 VOID
111 ClearBits (
112 IN EFI_PHYSICAL_ADDRESS Address,
113 IN UINTN BitNumber,
114 IN UINT64 *BitMap
115 )
116 {
117 UINTN Lsbs;
118 UINTN Qwords;
119 UINTN Msbs;
120 UINTN StartBit;
121 UINTN EndBit;
122
123 StartBit = (UINTN)GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address);
124 EndBit = (StartBit + BitNumber - 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
125
126 if ((StartBit + BitNumber) > GUARDED_HEAP_MAP_ENTRY_BITS) {
127 Msbs = (GUARDED_HEAP_MAP_ENTRY_BITS - StartBit) %
128 GUARDED_HEAP_MAP_ENTRY_BITS;
129 Lsbs = (EndBit + 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
130 Qwords = (BitNumber - Msbs) / GUARDED_HEAP_MAP_ENTRY_BITS;
131 } else {
132 Msbs = BitNumber;
133 Lsbs = 0;
134 Qwords = 0;
135 }
136
137 if (Msbs > 0) {
138 *BitMap &= ~LShiftU64 (LShiftU64 (1, Msbs) - 1, StartBit);
139 BitMap += 1;
140 }
141
142 if (Qwords > 0) {
143 SetMem64 ((VOID *)BitMap, Qwords * GUARDED_HEAP_MAP_ENTRY_BYTES, 0);
144 BitMap += Qwords;
145 }
146
147 if (Lsbs > 0) {
148 *BitMap &= ~(LShiftU64 (1, Lsbs) - 1);
149 }
150 }
151
152 /**
153 Get corresponding bits in bitmap table according to the address.
154
155 The value of bit 0 corresponds to the status of memory at given Address.
156 No more than 64 bits can be retrieved in one call.
157
158 @param[in] Address Start address to retrieve bits for.
159 @param[in] BitNumber Number of bits to get.
160 @param[in] BitMap Pointer to bitmap which covers the Address.
161
162 @return An integer containing the bits information.
163 **/
164 STATIC
165 UINT64
166 GetBits (
167 IN EFI_PHYSICAL_ADDRESS Address,
168 IN UINTN BitNumber,
169 IN UINT64 *BitMap
170 )
171 {
172 UINTN StartBit;
173 UINTN EndBit;
174 UINTN Lsbs;
175 UINTN Msbs;
176 UINT64 Result;
177
178 ASSERT (BitNumber <= GUARDED_HEAP_MAP_ENTRY_BITS);
179
180 StartBit = (UINTN)GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address);
181 EndBit = (StartBit + BitNumber - 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
182
183 if ((StartBit + BitNumber) > GUARDED_HEAP_MAP_ENTRY_BITS) {
184 Msbs = GUARDED_HEAP_MAP_ENTRY_BITS - StartBit;
185 Lsbs = (EndBit + 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
186 } else {
187 Msbs = BitNumber;
188 Lsbs = 0;
189 }
190
191 Result = RShiftU64 ((*BitMap), StartBit) & (LShiftU64 (1, Msbs) - 1);
192 if (Lsbs > 0) {
193 BitMap += 1;
194 Result |= LShiftU64 ((*BitMap) & (LShiftU64 (1, Lsbs) - 1), Msbs);
195 }
196
197 return Result;
198 }
199
200 /**
201 Locate the pointer of bitmap from the guarded memory bitmap tables, which
202 covers the given Address.
203
204 @param[in] Address Start address to search the bitmap for.
205 @param[in] AllocMapUnit Flag to indicate memory allocation for the table.
206 @param[out] BitMap Pointer to bitmap which covers the Address.
207
208 @return The bit number from given Address to the end of current map table.
209 **/
210 UINTN
211 FindGuardedMemoryMap (
212 IN EFI_PHYSICAL_ADDRESS Address,
213 IN BOOLEAN AllocMapUnit,
214 OUT UINT64 **BitMap
215 )
216 {
217 UINTN Level;
218 UINT64 *GuardMap;
219 UINT64 MapMemory;
220 UINTN Index;
221 UINTN Size;
222 UINTN BitsToUnitEnd;
223 EFI_STATUS Status;
224
225 //
226 // Adjust current map table depth according to the address to access
227 //
228 while (mMapLevel < GUARDED_HEAP_MAP_TABLE_DEPTH
229 &&
230 RShiftU64 (
231 Address,
232 mLevelShift[GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel - 1]
233 ) != 0) {
234
235 if (mGuardedMemoryMap != 0) {
236 Size = (mLevelMask[GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel - 1] + 1)
237 * GUARDED_HEAP_MAP_ENTRY_BYTES;
238 Status = CoreInternalAllocatePages (
239 AllocateAnyPages,
240 EfiBootServicesData,
241 EFI_SIZE_TO_PAGES (Size),
242 &MapMemory,
243 FALSE
244 );
245 ASSERT_EFI_ERROR (Status);
246 ASSERT (MapMemory != 0);
247
248 SetMem ((VOID *)(UINTN)MapMemory, Size, 0);
249
250 *(UINT64 *)(UINTN)MapMemory = mGuardedMemoryMap;
251 mGuardedMemoryMap = MapMemory;
252 }
253
254 mMapLevel++;
255
256 }
257
258 GuardMap = &mGuardedMemoryMap;
259 for (Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;
260 Level < GUARDED_HEAP_MAP_TABLE_DEPTH;
261 ++Level) {
262
263 if (*GuardMap == 0) {
264 if (!AllocMapUnit) {
265 GuardMap = NULL;
266 break;
267 }
268
269 Size = (mLevelMask[Level] + 1) * GUARDED_HEAP_MAP_ENTRY_BYTES;
270 Status = CoreInternalAllocatePages (
271 AllocateAnyPages,
272 EfiBootServicesData,
273 EFI_SIZE_TO_PAGES (Size),
274 &MapMemory,
275 FALSE
276 );
277 ASSERT_EFI_ERROR (Status);
278 ASSERT (MapMemory != 0);
279
280 SetMem ((VOID *)(UINTN)MapMemory, Size, 0);
281 *GuardMap = MapMemory;
282 }
283
284 Index = (UINTN)RShiftU64 (Address, mLevelShift[Level]);
285 Index &= mLevelMask[Level];
286 GuardMap = (UINT64 *)(UINTN)((*GuardMap) + Index * sizeof (UINT64));
287
288 }
289
290 BitsToUnitEnd = GUARDED_HEAP_MAP_BITS - GUARDED_HEAP_MAP_BIT_INDEX (Address);
291 *BitMap = GuardMap;
292
293 return BitsToUnitEnd;
294 }
295
296 /**
297 Set corresponding bits in bitmap table to 1 according to given memory range.
298
299 @param[in] Address Memory address to guard from.
300 @param[in] NumberOfPages Number of pages to guard.
301
302 @return VOID.
303 **/
304 VOID
305 EFIAPI
306 SetGuardedMemoryBits (
307 IN EFI_PHYSICAL_ADDRESS Address,
308 IN UINTN NumberOfPages
309 )
310 {
311 UINT64 *BitMap;
312 UINTN Bits;
313 UINTN BitsToUnitEnd;
314
315 while (NumberOfPages > 0) {
316 BitsToUnitEnd = FindGuardedMemoryMap (Address, TRUE, &BitMap);
317 ASSERT (BitMap != NULL);
318
319 if (NumberOfPages > BitsToUnitEnd) {
320 // Cross map unit
321 Bits = BitsToUnitEnd;
322 } else {
323 Bits = NumberOfPages;
324 }
325
326 SetBits (Address, Bits, BitMap);
327
328 NumberOfPages -= Bits;
329 Address += EFI_PAGES_TO_SIZE (Bits);
330 }
331 }
332
333 /**
334 Clear corresponding bits in bitmap table according to given memory range.
335
336 @param[in] Address Memory address to unset from.
337 @param[in] NumberOfPages Number of pages to unset guard.
338
339 @return VOID.
340 **/
341 VOID
342 EFIAPI
343 ClearGuardedMemoryBits (
344 IN EFI_PHYSICAL_ADDRESS Address,
345 IN UINTN NumberOfPages
346 )
347 {
348 UINT64 *BitMap;
349 UINTN Bits;
350 UINTN BitsToUnitEnd;
351
352 while (NumberOfPages > 0) {
353 BitsToUnitEnd = FindGuardedMemoryMap (Address, TRUE, &BitMap);
354 ASSERT (BitMap != NULL);
355
356 if (NumberOfPages > BitsToUnitEnd) {
357 // Cross map unit
358 Bits = BitsToUnitEnd;
359 } else {
360 Bits = NumberOfPages;
361 }
362
363 ClearBits (Address, Bits, BitMap);
364
365 NumberOfPages -= Bits;
366 Address += EFI_PAGES_TO_SIZE (Bits);
367 }
368 }
369
370 /**
371 Retrieve corresponding bits in bitmap table according to given memory range.
372
373 @param[in] Address Memory address to retrieve from.
374 @param[in] NumberOfPages Number of pages to retrieve.
375
376 @return An integer containing the guarded memory bitmap.
377 **/
378 UINTN
379 GetGuardedMemoryBits (
380 IN EFI_PHYSICAL_ADDRESS Address,
381 IN UINTN NumberOfPages
382 )
383 {
384 UINT64 *BitMap;
385 UINTN Bits;
386 UINTN Result;
387 UINTN Shift;
388 UINTN BitsToUnitEnd;
389
390 ASSERT (NumberOfPages <= GUARDED_HEAP_MAP_ENTRY_BITS);
391
392 Result = 0;
393 Shift = 0;
394 while (NumberOfPages > 0) {
395 BitsToUnitEnd = FindGuardedMemoryMap (Address, FALSE, &BitMap);
396
397 if (NumberOfPages > BitsToUnitEnd) {
398 // Cross map unit
399 Bits = BitsToUnitEnd;
400 } else {
401 Bits = NumberOfPages;
402 }
403
404 if (BitMap != NULL) {
405 Result |= LShiftU64 (GetBits (Address, Bits, BitMap), Shift);
406 }
407
408 Shift += Bits;
409 NumberOfPages -= Bits;
410 Address += EFI_PAGES_TO_SIZE (Bits);
411 }
412
413 return Result;
414 }
415
416 /**
417 Get bit value in bitmap table for the given address.
418
419 @param[in] Address The address to retrieve for.
420
421 @return 1 or 0.
422 **/
423 UINTN
424 EFIAPI
425 GetGuardMapBit (
426 IN EFI_PHYSICAL_ADDRESS Address
427 )
428 {
429 UINT64 *GuardMap;
430
431 FindGuardedMemoryMap (Address, FALSE, &GuardMap);
432 if (GuardMap != NULL) {
433 if (RShiftU64 (*GuardMap,
434 GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address)) & 1) {
435 return 1;
436 }
437 }
438
439 return 0;
440 }
441
442 /**
443 Set the bit in bitmap table for the given address.
444
445 @param[in] Address The address to set for.
446
447 @return VOID.
448 **/
449 VOID
450 EFIAPI
451 SetGuardMapBit (
452 IN EFI_PHYSICAL_ADDRESS Address
453 )
454 {
455 UINT64 *GuardMap;
456 UINT64 BitMask;
457
458 FindGuardedMemoryMap (Address, TRUE, &GuardMap);
459 if (GuardMap != NULL) {
460 BitMask = LShiftU64 (1, GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address));
461 *GuardMap |= BitMask;
462 }
463 }
464
465 /**
466 Clear the bit in bitmap table for the given address.
467
468 @param[in] Address The address to clear for.
469
470 @return VOID.
471 **/
472 VOID
473 EFIAPI
474 ClearGuardMapBit (
475 IN EFI_PHYSICAL_ADDRESS Address
476 )
477 {
478 UINT64 *GuardMap;
479 UINT64 BitMask;
480
481 FindGuardedMemoryMap (Address, TRUE, &GuardMap);
482 if (GuardMap != NULL) {
483 BitMask = LShiftU64 (1, GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address));
484 *GuardMap &= ~BitMask;
485 }
486 }
487
488 /**
489 Check to see if the page at the given address is a Guard page or not.
490
491 @param[in] Address The address to check for.
492
493 @return TRUE The page at Address is a Guard page.
494 @return FALSE The page at Address is not a Guard page.
495 **/
496 BOOLEAN
497 EFIAPI
498 IsGuardPage (
499 IN EFI_PHYSICAL_ADDRESS Address
500 )
501 {
502 UINTN BitMap;
503
504 //
505 // There must be at least one guarded page before and/or after given
506 // address if it's a Guard page. The bitmap pattern should be one of
507 // 001, 100 and 101
508 //
509 BitMap = GetGuardedMemoryBits (Address - EFI_PAGE_SIZE, 3);
510 return ((BitMap == BIT0) || (BitMap == BIT2) || (BitMap == (BIT2 | BIT0)));
511 }
512
513 /**
514 Check to see if the page at the given address is a head Guard page or not.
515
516 @param[in] Address The address to check for
517
518 @return TRUE The page at Address is a head Guard page
519 @return FALSE The page at Address is not a head Guard page
520 **/
521 BOOLEAN
522 EFIAPI
523 IsHeadGuard (
524 IN EFI_PHYSICAL_ADDRESS Address
525 )
526 {
527 return (GetGuardedMemoryBits (Address, 2) == BIT1);
528 }
529
530 /**
531 Check to see if the page at the given address is a tail Guard page or not.
532
533 @param[in] Address The address to check for.
534
535 @return TRUE The page at Address is a tail Guard page.
536 @return FALSE The page at Address is not a tail Guard page.
537 **/
538 BOOLEAN
539 EFIAPI
540 IsTailGuard (
541 IN EFI_PHYSICAL_ADDRESS Address
542 )
543 {
544 return (GetGuardedMemoryBits (Address - EFI_PAGE_SIZE, 2) == BIT0);
545 }
546
547 /**
548 Check to see if the page at the given address is guarded or not.
549
550 @param[in] Address The address to check for.
551
552 @return TRUE The page at Address is guarded.
553 @return FALSE The page at Address is not guarded.
554 **/
555 BOOLEAN
556 EFIAPI
557 IsMemoryGuarded (
558 IN EFI_PHYSICAL_ADDRESS Address
559 )
560 {
561 return (GetGuardMapBit (Address) == 1);
562 }
563
564 /**
565 Set the page at the given address to be a Guard page.
566
567 This is done by changing the page table attribute to be NOT PRSENT.
568
569 @param[in] BaseAddress Page address to Guard at
570
571 @return VOID
572 **/
573 VOID
574 EFIAPI
575 SetGuardPage (
576 IN EFI_PHYSICAL_ADDRESS BaseAddress
577 )
578 {
579 //
580 // Set flag to make sure allocating memory without GUARD for page table
581 // operation; otherwise infinite loops could be caused.
582 //
583 mOnGuarding = TRUE;
584 //
585 // Note: This might overwrite other attributes needed by other features,
586 // such as NX memory protection.
587 //
588 gCpu->SetMemoryAttributes (gCpu, BaseAddress, EFI_PAGE_SIZE, EFI_MEMORY_RP);
589 mOnGuarding = FALSE;
590 }
591
592 /**
593 Unset the Guard page at the given address to the normal memory.
594
595 This is done by changing the page table attribute to be PRSENT.
596
597 @param[in] BaseAddress Page address to Guard at.
598
599 @return VOID.
600 **/
601 VOID
602 EFIAPI
603 UnsetGuardPage (
604 IN EFI_PHYSICAL_ADDRESS BaseAddress
605 )
606 {
607 UINT64 Attributes;
608
609 //
610 // Once the Guard page is unset, it will be freed back to memory pool. NX
611 // memory protection must be restored for this page if NX is enabled for free
612 // memory.
613 //
614 Attributes = 0;
615 if ((PcdGet64 (PcdDxeNxMemoryProtectionPolicy) & (1 << EfiConventionalMemory)) != 0) {
616 Attributes |= EFI_MEMORY_XP;
617 }
618
619 //
620 // Set flag to make sure allocating memory without GUARD for page table
621 // operation; otherwise infinite loops could be caused.
622 //
623 mOnGuarding = TRUE;
624 //
625 // Note: This might overwrite other attributes needed by other features,
626 // such as memory protection (NX). Please make sure they are not enabled
627 // at the same time.
628 //
629 gCpu->SetMemoryAttributes (gCpu, BaseAddress, EFI_PAGE_SIZE, Attributes);
630 mOnGuarding = FALSE;
631 }
632
633 /**
634 Check to see if the memory at the given address should be guarded or not.
635
636 @param[in] MemoryType Memory type to check.
637 @param[in] AllocateType Allocation type to check.
638 @param[in] PageOrPool Indicate a page allocation or pool allocation.
639
640
641 @return TRUE The given type of memory should be guarded.
642 @return FALSE The given type of memory should not be guarded.
643 **/
644 BOOLEAN
645 IsMemoryTypeToGuard (
646 IN EFI_MEMORY_TYPE MemoryType,
647 IN EFI_ALLOCATE_TYPE AllocateType,
648 IN UINT8 PageOrPool
649 )
650 {
651 UINT64 TestBit;
652 UINT64 ConfigBit;
653 BOOLEAN InSmm;
654
655 if (gCpu == NULL || AllocateType == AllocateAddress) {
656 return FALSE;
657 }
658
659 InSmm = FALSE;
660 if (gSmmBase2 != NULL) {
661 gSmmBase2->InSmm (gSmmBase2, &InSmm);
662 }
663
664 if (InSmm) {
665 return FALSE;
666 }
667
668 if ((PcdGet8 (PcdHeapGuardPropertyMask) & PageOrPool) == 0) {
669 return FALSE;
670 }
671
672 if (PageOrPool == GUARD_HEAP_TYPE_POOL) {
673 ConfigBit = PcdGet64 (PcdHeapGuardPoolType);
674 } else if (PageOrPool == GUARD_HEAP_TYPE_PAGE) {
675 ConfigBit = PcdGet64 (PcdHeapGuardPageType);
676 } else {
677 ConfigBit = (UINT64)-1;
678 }
679
680 if ((UINT32)MemoryType >= MEMORY_TYPE_OS_RESERVED_MIN) {
681 TestBit = BIT63;
682 } else if ((UINT32) MemoryType >= MEMORY_TYPE_OEM_RESERVED_MIN) {
683 TestBit = BIT62;
684 } else if (MemoryType < EfiMaxMemoryType) {
685 TestBit = LShiftU64 (1, MemoryType);
686 } else if (MemoryType == EfiMaxMemoryType) {
687 TestBit = (UINT64)-1;
688 } else {
689 TestBit = 0;
690 }
691
692 return ((ConfigBit & TestBit) != 0);
693 }
694
695 /**
696 Check to see if the pool at the given address should be guarded or not.
697
698 @param[in] MemoryType Pool type to check.
699
700
701 @return TRUE The given type of pool should be guarded.
702 @return FALSE The given type of pool should not be guarded.
703 **/
704 BOOLEAN
705 IsPoolTypeToGuard (
706 IN EFI_MEMORY_TYPE MemoryType
707 )
708 {
709 return IsMemoryTypeToGuard (MemoryType, AllocateAnyPages,
710 GUARD_HEAP_TYPE_POOL);
711 }
712
713 /**
714 Check to see if the page at the given address should be guarded or not.
715
716 @param[in] MemoryType Page type to check.
717 @param[in] AllocateType Allocation type to check.
718
719 @return TRUE The given type of page should be guarded.
720 @return FALSE The given type of page should not be guarded.
721 **/
722 BOOLEAN
723 IsPageTypeToGuard (
724 IN EFI_MEMORY_TYPE MemoryType,
725 IN EFI_ALLOCATE_TYPE AllocateType
726 )
727 {
728 return IsMemoryTypeToGuard (MemoryType, AllocateType, GUARD_HEAP_TYPE_PAGE);
729 }
730
731 /**
732 Set head Guard and tail Guard for the given memory range.
733
734 @param[in] Memory Base address of memory to set guard for.
735 @param[in] NumberOfPages Memory size in pages.
736
737 @return VOID
738 **/
739 VOID
740 SetGuardForMemory (
741 IN EFI_PHYSICAL_ADDRESS Memory,
742 IN UINTN NumberOfPages
743 )
744 {
745 EFI_PHYSICAL_ADDRESS GuardPage;
746
747 //
748 // Set tail Guard
749 //
750 GuardPage = Memory + EFI_PAGES_TO_SIZE (NumberOfPages);
751 if (!IsGuardPage (GuardPage)) {
752 SetGuardPage (GuardPage);
753 }
754
755 // Set head Guard
756 GuardPage = Memory - EFI_PAGES_TO_SIZE (1);
757 if (!IsGuardPage (GuardPage)) {
758 SetGuardPage (GuardPage);
759 }
760
761 //
762 // Mark the memory range as Guarded
763 //
764 SetGuardedMemoryBits (Memory, NumberOfPages);
765 }
766
767 /**
768 Unset head Guard and tail Guard for the given memory range.
769
770 @param[in] Memory Base address of memory to unset guard for.
771 @param[in] NumberOfPages Memory size in pages.
772
773 @return VOID
774 **/
775 VOID
776 UnsetGuardForMemory (
777 IN EFI_PHYSICAL_ADDRESS Memory,
778 IN UINTN NumberOfPages
779 )
780 {
781 EFI_PHYSICAL_ADDRESS GuardPage;
782 UINT64 GuardBitmap;
783
784 if (NumberOfPages == 0) {
785 return;
786 }
787
788 //
789 // Head Guard must be one page before, if any.
790 //
791 // MSB-> 1 0 <-LSB
792 // -------------------
793 // Head Guard -> 0 1 -> Don't free Head Guard (shared Guard)
794 // Head Guard -> 0 0 -> Free Head Guard either (not shared Guard)
795 // 1 X -> Don't free first page (need a new Guard)
796 // (it'll be turned into a Guard page later)
797 // -------------------
798 // Start -> -1 -2
799 //
800 GuardPage = Memory - EFI_PAGES_TO_SIZE (1);
801 GuardBitmap = GetGuardedMemoryBits (Memory - EFI_PAGES_TO_SIZE (2), 2);
802 if ((GuardBitmap & BIT1) == 0) {
803 //
804 // Head Guard exists.
805 //
806 if ((GuardBitmap & BIT0) == 0) {
807 //
808 // If the head Guard is not a tail Guard of adjacent memory block,
809 // unset it.
810 //
811 UnsetGuardPage (GuardPage);
812 }
813 } else {
814 //
815 // Pages before memory to free are still in Guard. It's a partial free
816 // case. Turn first page of memory block to free into a new Guard.
817 //
818 SetGuardPage (Memory);
819 }
820
821 //
822 // Tail Guard must be the page after this memory block to free, if any.
823 //
824 // MSB-> 1 0 <-LSB
825 // --------------------
826 // 1 0 <- Tail Guard -> Don't free Tail Guard (shared Guard)
827 // 0 0 <- Tail Guard -> Free Tail Guard either (not shared Guard)
828 // X 1 -> Don't free last page (need a new Guard)
829 // (it'll be turned into a Guard page later)
830 // --------------------
831 // +1 +0 <- End
832 //
833 GuardPage = Memory + EFI_PAGES_TO_SIZE (NumberOfPages);
834 GuardBitmap = GetGuardedMemoryBits (GuardPage, 2);
835 if ((GuardBitmap & BIT0) == 0) {
836 //
837 // Tail Guard exists.
838 //
839 if ((GuardBitmap & BIT1) == 0) {
840 //
841 // If the tail Guard is not a head Guard of adjacent memory block,
842 // free it; otherwise, keep it.
843 //
844 UnsetGuardPage (GuardPage);
845 }
846 } else {
847 //
848 // Pages after memory to free are still in Guard. It's a partial free
849 // case. We need to keep one page to be a head Guard.
850 //
851 SetGuardPage (GuardPage - EFI_PAGES_TO_SIZE (1));
852 }
853
854 //
855 // No matter what, we just clear the mark of the Guarded memory.
856 //
857 ClearGuardedMemoryBits(Memory, NumberOfPages);
858 }
859
860 /**
861 Adjust address of free memory according to existing and/or required Guard.
862
863 This function will check if there're existing Guard pages of adjacent
864 memory blocks, and try to use it as the Guard page of the memory to be
865 allocated.
866
867 @param[in] Start Start address of free memory block.
868 @param[in] Size Size of free memory block.
869 @param[in] SizeRequested Size of memory to allocate.
870
871 @return The end address of memory block found.
872 @return 0 if no enough space for the required size of memory and its Guard.
873 **/
874 UINT64
875 AdjustMemoryS (
876 IN UINT64 Start,
877 IN UINT64 Size,
878 IN UINT64 SizeRequested
879 )
880 {
881 UINT64 Target;
882
883 //
884 // UEFI spec requires that allocated pool must be 8-byte aligned. If it's
885 // indicated to put the pool near the Tail Guard, we need extra bytes to
886 // make sure alignment of the returned pool address.
887 //
888 if ((PcdGet8 (PcdHeapGuardPropertyMask) & BIT7) == 0) {
889 SizeRequested = ALIGN_VALUE(SizeRequested, 8);
890 }
891
892 Target = Start + Size - SizeRequested;
893
894 //
895 // At least one more page needed for Guard page.
896 //
897 if (Size < (SizeRequested + EFI_PAGES_TO_SIZE (1))) {
898 return 0;
899 }
900
901 if (!IsGuardPage (Start + Size)) {
902 // No Guard at tail to share. One more page is needed.
903 Target -= EFI_PAGES_TO_SIZE (1);
904 }
905
906 // Out of range?
907 if (Target < Start) {
908 return 0;
909 }
910
911 // At the edge?
912 if (Target == Start) {
913 if (!IsGuardPage (Target - EFI_PAGES_TO_SIZE (1))) {
914 // No enough space for a new head Guard if no Guard at head to share.
915 return 0;
916 }
917 }
918
919 // OK, we have enough pages for memory and its Guards. Return the End of the
920 // free space.
921 return Target + SizeRequested - 1;
922 }
923
924 /**
925 Adjust the start address and number of pages to free according to Guard.
926
927 The purpose of this function is to keep the shared Guard page with adjacent
928 memory block if it's still in guard, or free it if no more sharing. Another
929 is to reserve pages as Guard pages in partial page free situation.
930
931 @param[in,out] Memory Base address of memory to free.
932 @param[in,out] NumberOfPages Size of memory to free.
933
934 @return VOID.
935 **/
936 VOID
937 AdjustMemoryF (
938 IN OUT EFI_PHYSICAL_ADDRESS *Memory,
939 IN OUT UINTN *NumberOfPages
940 )
941 {
942 EFI_PHYSICAL_ADDRESS Start;
943 EFI_PHYSICAL_ADDRESS MemoryToTest;
944 UINTN PagesToFree;
945 UINT64 GuardBitmap;
946
947 if (Memory == NULL || NumberOfPages == NULL || *NumberOfPages == 0) {
948 return;
949 }
950
951 Start = *Memory;
952 PagesToFree = *NumberOfPages;
953
954 //
955 // Head Guard must be one page before, if any.
956 //
957 // MSB-> 1 0 <-LSB
958 // -------------------
959 // Head Guard -> 0 1 -> Don't free Head Guard (shared Guard)
960 // Head Guard -> 0 0 -> Free Head Guard either (not shared Guard)
961 // 1 X -> Don't free first page (need a new Guard)
962 // (it'll be turned into a Guard page later)
963 // -------------------
964 // Start -> -1 -2
965 //
966 MemoryToTest = Start - EFI_PAGES_TO_SIZE (2);
967 GuardBitmap = GetGuardedMemoryBits (MemoryToTest, 2);
968 if ((GuardBitmap & BIT1) == 0) {
969 //
970 // Head Guard exists.
971 //
972 if ((GuardBitmap & BIT0) == 0) {
973 //
974 // If the head Guard is not a tail Guard of adjacent memory block,
975 // free it; otherwise, keep it.
976 //
977 Start -= EFI_PAGES_TO_SIZE (1);
978 PagesToFree += 1;
979 }
980 } else {
981 //
982 // No Head Guard, and pages before memory to free are still in Guard. It's a
983 // partial free case. We need to keep one page to be a tail Guard.
984 //
985 Start += EFI_PAGES_TO_SIZE (1);
986 PagesToFree -= 1;
987 }
988
989 //
990 // Tail Guard must be the page after this memory block to free, if any.
991 //
992 // MSB-> 1 0 <-LSB
993 // --------------------
994 // 1 0 <- Tail Guard -> Don't free Tail Guard (shared Guard)
995 // 0 0 <- Tail Guard -> Free Tail Guard either (not shared Guard)
996 // X 1 -> Don't free last page (need a new Guard)
997 // (it'll be turned into a Guard page later)
998 // --------------------
999 // +1 +0 <- End
1000 //
1001 MemoryToTest = Start + EFI_PAGES_TO_SIZE (PagesToFree);
1002 GuardBitmap = GetGuardedMemoryBits (MemoryToTest, 2);
1003 if ((GuardBitmap & BIT0) == 0) {
1004 //
1005 // Tail Guard exists.
1006 //
1007 if ((GuardBitmap & BIT1) == 0) {
1008 //
1009 // If the tail Guard is not a head Guard of adjacent memory block,
1010 // free it; otherwise, keep it.
1011 //
1012 PagesToFree += 1;
1013 }
1014 } else if (PagesToFree > 0) {
1015 //
1016 // No Tail Guard, and pages after memory to free are still in Guard. It's a
1017 // partial free case. We need to keep one page to be a head Guard.
1018 //
1019 PagesToFree -= 1;
1020 }
1021
1022 *Memory = Start;
1023 *NumberOfPages = PagesToFree;
1024 }
1025
1026 /**
1027 Adjust the base and number of pages to really allocate according to Guard.
1028
1029 @param[in,out] Memory Base address of free memory.
1030 @param[in,out] NumberOfPages Size of memory to allocate.
1031
1032 @return VOID.
1033 **/
1034 VOID
1035 AdjustMemoryA (
1036 IN OUT EFI_PHYSICAL_ADDRESS *Memory,
1037 IN OUT UINTN *NumberOfPages
1038 )
1039 {
1040 //
1041 // FindFreePages() has already taken the Guard into account. It's safe to
1042 // adjust the start address and/or number of pages here, to make sure that
1043 // the Guards are also "allocated".
1044 //
1045 if (!IsGuardPage (*Memory + EFI_PAGES_TO_SIZE (*NumberOfPages))) {
1046 // No tail Guard, add one.
1047 *NumberOfPages += 1;
1048 }
1049
1050 if (!IsGuardPage (*Memory - EFI_PAGE_SIZE)) {
1051 // No head Guard, add one.
1052 *Memory -= EFI_PAGE_SIZE;
1053 *NumberOfPages += 1;
1054 }
1055 }
1056
1057 /**
1058 Adjust the pool head position to make sure the Guard page is adjavent to
1059 pool tail or pool head.
1060
1061 @param[in] Memory Base address of memory allocated.
1062 @param[in] NoPages Number of pages actually allocated.
1063 @param[in] Size Size of memory requested.
1064 (plus pool head/tail overhead)
1065
1066 @return Address of pool head.
1067 **/
1068 VOID *
1069 AdjustPoolHeadA (
1070 IN EFI_PHYSICAL_ADDRESS Memory,
1071 IN UINTN NoPages,
1072 IN UINTN Size
1073 )
1074 {
1075 if (Memory == 0 || (PcdGet8 (PcdHeapGuardPropertyMask) & BIT7) != 0) {
1076 //
1077 // Pool head is put near the head Guard
1078 //
1079 return (VOID *)(UINTN)Memory;
1080 }
1081
1082 //
1083 // Pool head is put near the tail Guard
1084 //
1085 Size = ALIGN_VALUE (Size, 8);
1086 return (VOID *)(UINTN)(Memory + EFI_PAGES_TO_SIZE (NoPages) - Size);
1087 }
1088
1089 /**
1090 Get the page base address according to pool head address.
1091
1092 @param[in] Memory Head address of pool to free.
1093
1094 @return Address of pool head.
1095 **/
1096 VOID *
1097 AdjustPoolHeadF (
1098 IN EFI_PHYSICAL_ADDRESS Memory
1099 )
1100 {
1101 if (Memory == 0 || (PcdGet8 (PcdHeapGuardPropertyMask) & BIT7) != 0) {
1102 //
1103 // Pool head is put near the head Guard
1104 //
1105 return (VOID *)(UINTN)Memory;
1106 }
1107
1108 //
1109 // Pool head is put near the tail Guard
1110 //
1111 return (VOID *)(UINTN)(Memory & ~EFI_PAGE_MASK);
1112 }
1113
1114 /**
1115 Allocate or free guarded memory.
1116
1117 @param[in] Start Start address of memory to allocate or free.
1118 @param[in] NumberOfPages Memory size in pages.
1119 @param[in] NewType Memory type to convert to.
1120
1121 @return VOID.
1122 **/
1123 EFI_STATUS
1124 CoreConvertPagesWithGuard (
1125 IN UINT64 Start,
1126 IN UINTN NumberOfPages,
1127 IN EFI_MEMORY_TYPE NewType
1128 )
1129 {
1130 UINT64 OldStart;
1131 UINTN OldPages;
1132
1133 if (NewType == EfiConventionalMemory) {
1134 OldStart = Start;
1135 OldPages = NumberOfPages;
1136
1137 AdjustMemoryF (&Start, &NumberOfPages);
1138 //
1139 // It's safe to unset Guard page inside memory lock because there should
1140 // be no memory allocation occurred in updating memory page attribute at
1141 // this point. And unsetting Guard page before free will prevent Guard
1142 // page just freed back to pool from being allocated right away before
1143 // marking it usable (from non-present to present).
1144 //
1145 UnsetGuardForMemory (OldStart, OldPages);
1146 if (NumberOfPages == 0) {
1147 return EFI_SUCCESS;
1148 }
1149 } else {
1150 AdjustMemoryA (&Start, &NumberOfPages);
1151 }
1152
1153 return CoreConvertPages (Start, NumberOfPages, NewType);
1154 }
1155
1156 /**
1157 Helper function to convert a UINT64 value in binary to a string.
1158
1159 @param[in] Value Value of a UINT64 integer.
1160 @param[out] BinString String buffer to contain the conversion result.
1161
1162 @return VOID.
1163 **/
1164 VOID
1165 Uint64ToBinString (
1166 IN UINT64 Value,
1167 OUT CHAR8 *BinString
1168 )
1169 {
1170 UINTN Index;
1171
1172 if (BinString == NULL) {
1173 return;
1174 }
1175
1176 for (Index = 64; Index > 0; --Index) {
1177 BinString[Index - 1] = '0' + (Value & 1);
1178 Value = RShiftU64 (Value, 1);
1179 }
1180 BinString[64] = '\0';
1181 }
1182
1183 /**
1184 Dump the guarded memory bit map.
1185 **/
1186 VOID
1187 EFIAPI
1188 DumpGuardedMemoryBitmap (
1189 VOID
1190 )
1191 {
1192 UINTN Entries[GUARDED_HEAP_MAP_TABLE_DEPTH];
1193 UINTN Shifts[GUARDED_HEAP_MAP_TABLE_DEPTH];
1194 UINTN Indices[GUARDED_HEAP_MAP_TABLE_DEPTH];
1195 UINT64 Tables[GUARDED_HEAP_MAP_TABLE_DEPTH];
1196 UINT64 Addresses[GUARDED_HEAP_MAP_TABLE_DEPTH];
1197 UINT64 TableEntry;
1198 UINT64 Address;
1199 INTN Level;
1200 UINTN RepeatZero;
1201 CHAR8 String[GUARDED_HEAP_MAP_ENTRY_BITS + 1];
1202 CHAR8 *Ruler1;
1203 CHAR8 *Ruler2;
1204
1205 if (mGuardedMemoryMap == 0 ||
1206 mMapLevel == 0 ||
1207 mMapLevel > GUARDED_HEAP_MAP_TABLE_DEPTH) {
1208 return;
1209 }
1210
1211 Ruler1 = " 3 2 1 0";
1212 Ruler2 = "FEDCBA9876543210FEDCBA9876543210FEDCBA9876543210FEDCBA9876543210";
1213
1214 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "============================="
1215 " Guarded Memory Bitmap "
1216 "==============================\r\n"));
1217 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, " %a\r\n", Ruler1));
1218 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, " %a\r\n", Ruler2));
1219
1220 CopyMem (Entries, mLevelMask, sizeof (Entries));
1221 CopyMem (Shifts, mLevelShift, sizeof (Shifts));
1222
1223 SetMem (Indices, sizeof(Indices), 0);
1224 SetMem (Tables, sizeof(Tables), 0);
1225 SetMem (Addresses, sizeof(Addresses), 0);
1226
1227 Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;
1228 Tables[Level] = mGuardedMemoryMap;
1229 Address = 0;
1230 RepeatZero = 0;
1231
1232 while (TRUE) {
1233 if (Indices[Level] > Entries[Level]) {
1234
1235 Tables[Level] = 0;
1236 Level -= 1;
1237 RepeatZero = 0;
1238
1239 DEBUG ((
1240 HEAP_GUARD_DEBUG_LEVEL,
1241 "========================================="
1242 "=========================================\r\n"
1243 ));
1244
1245 } else {
1246
1247 TableEntry = ((UINT64 *)(UINTN)Tables[Level])[Indices[Level]];
1248 Address = Addresses[Level];
1249
1250 if (TableEntry == 0) {
1251
1252 if (Level == GUARDED_HEAP_MAP_TABLE_DEPTH - 1) {
1253 if (RepeatZero == 0) {
1254 Uint64ToBinString(TableEntry, String);
1255 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "%016lx: %a\r\n", Address, String));
1256 } else if (RepeatZero == 1) {
1257 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "... : ...\r\n"));
1258 }
1259 RepeatZero += 1;
1260 }
1261
1262 } else if (Level < GUARDED_HEAP_MAP_TABLE_DEPTH - 1) {
1263
1264 Level += 1;
1265 Tables[Level] = TableEntry;
1266 Addresses[Level] = Address;
1267 Indices[Level] = 0;
1268 RepeatZero = 0;
1269
1270 continue;
1271
1272 } else {
1273
1274 RepeatZero = 0;
1275 Uint64ToBinString(TableEntry, String);
1276 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "%016lx: %a\r\n", Address, String));
1277
1278 }
1279 }
1280
1281 if (Level < (GUARDED_HEAP_MAP_TABLE_DEPTH - (INTN)mMapLevel)) {
1282 break;
1283 }
1284
1285 Indices[Level] += 1;
1286 Address = (Level == 0) ? 0 : Addresses[Level - 1];
1287 Addresses[Level] = Address | LShiftU64(Indices[Level], Shifts[Level]);
1288
1289 }
1290 }
1291