]> git.proxmox.com Git - mirror_edk2.git/blob - MdeModulePkg/Core/Dxe/Mem/HeapGuard.c
663f969c0dc7c626d35d54c9379feb483189cda6
[mirror_edk2.git] / MdeModulePkg / Core / Dxe / Mem / HeapGuard.c
1 /** @file
2 UEFI Heap Guard functions.
3
4 Copyright (c) 2017-2018, Intel Corporation. All rights reserved.<BR>
5 This program and the accompanying materials
6 are licensed and made available under the terms and conditions of the BSD License
7 which accompanies this distribution. The full text of the license may be found at
8 http://opensource.org/licenses/bsd-license.php
9
10 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
11 WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
12
13 **/
14
15 #include "DxeMain.h"
16 #include "Imem.h"
17 #include "HeapGuard.h"
18
19 //
20 // Global to avoid infinite reentrance of memory allocation when updating
21 // page table attributes, which may need allocate pages for new PDE/PTE.
22 //
23 GLOBAL_REMOVE_IF_UNREFERENCED BOOLEAN mOnGuarding = FALSE;
24
25 //
26 // Pointer to table tracking the Guarded memory with bitmap, in which '1'
27 // is used to indicate memory guarded. '0' might be free memory or Guard
28 // page itself, depending on status of memory adjacent to it.
29 //
30 GLOBAL_REMOVE_IF_UNREFERENCED UINT64 mGuardedMemoryMap = 0;
31
32 //
33 // Current depth level of map table pointed by mGuardedMemoryMap.
34 // mMapLevel must be initialized at least by 1. It will be automatically
35 // updated according to the address of memory just tracked.
36 //
37 GLOBAL_REMOVE_IF_UNREFERENCED UINTN mMapLevel = 1;
38
39 //
40 // Shift and mask for each level of map table
41 //
42 GLOBAL_REMOVE_IF_UNREFERENCED UINTN mLevelShift[GUARDED_HEAP_MAP_TABLE_DEPTH]
43 = GUARDED_HEAP_MAP_TABLE_DEPTH_SHIFTS;
44 GLOBAL_REMOVE_IF_UNREFERENCED UINTN mLevelMask[GUARDED_HEAP_MAP_TABLE_DEPTH]
45 = GUARDED_HEAP_MAP_TABLE_DEPTH_MASKS;
46
47 /**
48 Set corresponding bits in bitmap table to 1 according to the address.
49
50 @param[in] Address Start address to set for.
51 @param[in] BitNumber Number of bits to set.
52 @param[in] BitMap Pointer to bitmap which covers the Address.
53
54 @return VOID.
55 **/
56 STATIC
57 VOID
58 SetBits (
59 IN EFI_PHYSICAL_ADDRESS Address,
60 IN UINTN BitNumber,
61 IN UINT64 *BitMap
62 )
63 {
64 UINTN Lsbs;
65 UINTN Qwords;
66 UINTN Msbs;
67 UINTN StartBit;
68 UINTN EndBit;
69
70 StartBit = (UINTN)GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address);
71 EndBit = (StartBit + BitNumber - 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
72
73 if ((StartBit + BitNumber) >= GUARDED_HEAP_MAP_ENTRY_BITS) {
74 Msbs = (GUARDED_HEAP_MAP_ENTRY_BITS - StartBit) %
75 GUARDED_HEAP_MAP_ENTRY_BITS;
76 Lsbs = (EndBit + 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
77 Qwords = (BitNumber - Msbs) / GUARDED_HEAP_MAP_ENTRY_BITS;
78 } else {
79 Msbs = BitNumber;
80 Lsbs = 0;
81 Qwords = 0;
82 }
83
84 if (Msbs > 0) {
85 *BitMap |= LShiftU64 (LShiftU64 (1, Msbs) - 1, StartBit);
86 BitMap += 1;
87 }
88
89 if (Qwords > 0) {
90 SetMem64 ((VOID *)BitMap, Qwords * GUARDED_HEAP_MAP_ENTRY_BYTES,
91 (UINT64)-1);
92 BitMap += Qwords;
93 }
94
95 if (Lsbs > 0) {
96 *BitMap |= (LShiftU64 (1, Lsbs) - 1);
97 }
98 }
99
100 /**
101 Set corresponding bits in bitmap table to 0 according to the address.
102
103 @param[in] Address Start address to set for.
104 @param[in] BitNumber Number of bits to set.
105 @param[in] BitMap Pointer to bitmap which covers the Address.
106
107 @return VOID.
108 **/
109 STATIC
110 VOID
111 ClearBits (
112 IN EFI_PHYSICAL_ADDRESS Address,
113 IN UINTN BitNumber,
114 IN UINT64 *BitMap
115 )
116 {
117 UINTN Lsbs;
118 UINTN Qwords;
119 UINTN Msbs;
120 UINTN StartBit;
121 UINTN EndBit;
122
123 StartBit = (UINTN)GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address);
124 EndBit = (StartBit + BitNumber - 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
125
126 if ((StartBit + BitNumber) >= GUARDED_HEAP_MAP_ENTRY_BITS) {
127 Msbs = (GUARDED_HEAP_MAP_ENTRY_BITS - StartBit) %
128 GUARDED_HEAP_MAP_ENTRY_BITS;
129 Lsbs = (EndBit + 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
130 Qwords = (BitNumber - Msbs) / GUARDED_HEAP_MAP_ENTRY_BITS;
131 } else {
132 Msbs = BitNumber;
133 Lsbs = 0;
134 Qwords = 0;
135 }
136
137 if (Msbs > 0) {
138 *BitMap &= ~LShiftU64 (LShiftU64 (1, Msbs) - 1, StartBit);
139 BitMap += 1;
140 }
141
142 if (Qwords > 0) {
143 SetMem64 ((VOID *)BitMap, Qwords * GUARDED_HEAP_MAP_ENTRY_BYTES, 0);
144 BitMap += Qwords;
145 }
146
147 if (Lsbs > 0) {
148 *BitMap &= ~(LShiftU64 (1, Lsbs) - 1);
149 }
150 }
151
152 /**
153 Get corresponding bits in bitmap table according to the address.
154
155 The value of bit 0 corresponds to the status of memory at given Address.
156 No more than 64 bits can be retrieved in one call.
157
158 @param[in] Address Start address to retrieve bits for.
159 @param[in] BitNumber Number of bits to get.
160 @param[in] BitMap Pointer to bitmap which covers the Address.
161
162 @return An integer containing the bits information.
163 **/
164 STATIC
165 UINT64
166 GetBits (
167 IN EFI_PHYSICAL_ADDRESS Address,
168 IN UINTN BitNumber,
169 IN UINT64 *BitMap
170 )
171 {
172 UINTN StartBit;
173 UINTN EndBit;
174 UINTN Lsbs;
175 UINTN Msbs;
176 UINT64 Result;
177
178 ASSERT (BitNumber <= GUARDED_HEAP_MAP_ENTRY_BITS);
179
180 StartBit = (UINTN)GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address);
181 EndBit = (StartBit + BitNumber - 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
182
183 if ((StartBit + BitNumber) > GUARDED_HEAP_MAP_ENTRY_BITS) {
184 Msbs = GUARDED_HEAP_MAP_ENTRY_BITS - StartBit;
185 Lsbs = (EndBit + 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
186 } else {
187 Msbs = BitNumber;
188 Lsbs = 0;
189 }
190
191 if (StartBit == 0 && BitNumber == GUARDED_HEAP_MAP_ENTRY_BITS) {
192 Result = *BitMap;
193 } else {
194 Result = RShiftU64((*BitMap), StartBit) & (LShiftU64(1, Msbs) - 1);
195 if (Lsbs > 0) {
196 BitMap += 1;
197 Result |= LShiftU64 ((*BitMap) & (LShiftU64 (1, Lsbs) - 1), Msbs);
198 }
199 }
200
201 return Result;
202 }
203
204 /**
205 Locate the pointer of bitmap from the guarded memory bitmap tables, which
206 covers the given Address.
207
208 @param[in] Address Start address to search the bitmap for.
209 @param[in] AllocMapUnit Flag to indicate memory allocation for the table.
210 @param[out] BitMap Pointer to bitmap which covers the Address.
211
212 @return The bit number from given Address to the end of current map table.
213 **/
214 UINTN
215 FindGuardedMemoryMap (
216 IN EFI_PHYSICAL_ADDRESS Address,
217 IN BOOLEAN AllocMapUnit,
218 OUT UINT64 **BitMap
219 )
220 {
221 UINTN Level;
222 UINT64 *GuardMap;
223 UINT64 MapMemory;
224 UINTN Index;
225 UINTN Size;
226 UINTN BitsToUnitEnd;
227 EFI_STATUS Status;
228
229 //
230 // Adjust current map table depth according to the address to access
231 //
232 while (AllocMapUnit &&
233 mMapLevel < GUARDED_HEAP_MAP_TABLE_DEPTH &&
234 RShiftU64 (
235 Address,
236 mLevelShift[GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel - 1]
237 ) != 0) {
238
239 if (mGuardedMemoryMap != 0) {
240 Size = (mLevelMask[GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel - 1] + 1)
241 * GUARDED_HEAP_MAP_ENTRY_BYTES;
242 Status = CoreInternalAllocatePages (
243 AllocateAnyPages,
244 EfiBootServicesData,
245 EFI_SIZE_TO_PAGES (Size),
246 &MapMemory,
247 FALSE
248 );
249 ASSERT_EFI_ERROR (Status);
250 ASSERT (MapMemory != 0);
251
252 SetMem ((VOID *)(UINTN)MapMemory, Size, 0);
253
254 *(UINT64 *)(UINTN)MapMemory = mGuardedMemoryMap;
255 mGuardedMemoryMap = MapMemory;
256 }
257
258 mMapLevel++;
259
260 }
261
262 GuardMap = &mGuardedMemoryMap;
263 for (Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;
264 Level < GUARDED_HEAP_MAP_TABLE_DEPTH;
265 ++Level) {
266
267 if (*GuardMap == 0) {
268 if (!AllocMapUnit) {
269 GuardMap = NULL;
270 break;
271 }
272
273 Size = (mLevelMask[Level] + 1) * GUARDED_HEAP_MAP_ENTRY_BYTES;
274 Status = CoreInternalAllocatePages (
275 AllocateAnyPages,
276 EfiBootServicesData,
277 EFI_SIZE_TO_PAGES (Size),
278 &MapMemory,
279 FALSE
280 );
281 ASSERT_EFI_ERROR (Status);
282 ASSERT (MapMemory != 0);
283
284 SetMem ((VOID *)(UINTN)MapMemory, Size, 0);
285 *GuardMap = MapMemory;
286 }
287
288 Index = (UINTN)RShiftU64 (Address, mLevelShift[Level]);
289 Index &= mLevelMask[Level];
290 GuardMap = (UINT64 *)(UINTN)((*GuardMap) + Index * sizeof (UINT64));
291
292 }
293
294 BitsToUnitEnd = GUARDED_HEAP_MAP_BITS - GUARDED_HEAP_MAP_BIT_INDEX (Address);
295 *BitMap = GuardMap;
296
297 return BitsToUnitEnd;
298 }
299
300 /**
301 Set corresponding bits in bitmap table to 1 according to given memory range.
302
303 @param[in] Address Memory address to guard from.
304 @param[in] NumberOfPages Number of pages to guard.
305
306 @return VOID.
307 **/
308 VOID
309 EFIAPI
310 SetGuardedMemoryBits (
311 IN EFI_PHYSICAL_ADDRESS Address,
312 IN UINTN NumberOfPages
313 )
314 {
315 UINT64 *BitMap;
316 UINTN Bits;
317 UINTN BitsToUnitEnd;
318
319 while (NumberOfPages > 0) {
320 BitsToUnitEnd = FindGuardedMemoryMap (Address, TRUE, &BitMap);
321 ASSERT (BitMap != NULL);
322
323 if (NumberOfPages > BitsToUnitEnd) {
324 // Cross map unit
325 Bits = BitsToUnitEnd;
326 } else {
327 Bits = NumberOfPages;
328 }
329
330 SetBits (Address, Bits, BitMap);
331
332 NumberOfPages -= Bits;
333 Address += EFI_PAGES_TO_SIZE (Bits);
334 }
335 }
336
337 /**
338 Clear corresponding bits in bitmap table according to given memory range.
339
340 @param[in] Address Memory address to unset from.
341 @param[in] NumberOfPages Number of pages to unset guard.
342
343 @return VOID.
344 **/
345 VOID
346 EFIAPI
347 ClearGuardedMemoryBits (
348 IN EFI_PHYSICAL_ADDRESS Address,
349 IN UINTN NumberOfPages
350 )
351 {
352 UINT64 *BitMap;
353 UINTN Bits;
354 UINTN BitsToUnitEnd;
355
356 while (NumberOfPages > 0) {
357 BitsToUnitEnd = FindGuardedMemoryMap (Address, TRUE, &BitMap);
358 ASSERT (BitMap != NULL);
359
360 if (NumberOfPages > BitsToUnitEnd) {
361 // Cross map unit
362 Bits = BitsToUnitEnd;
363 } else {
364 Bits = NumberOfPages;
365 }
366
367 ClearBits (Address, Bits, BitMap);
368
369 NumberOfPages -= Bits;
370 Address += EFI_PAGES_TO_SIZE (Bits);
371 }
372 }
373
374 /**
375 Retrieve corresponding bits in bitmap table according to given memory range.
376
377 @param[in] Address Memory address to retrieve from.
378 @param[in] NumberOfPages Number of pages to retrieve.
379
380 @return An integer containing the guarded memory bitmap.
381 **/
382 UINTN
383 GetGuardedMemoryBits (
384 IN EFI_PHYSICAL_ADDRESS Address,
385 IN UINTN NumberOfPages
386 )
387 {
388 UINT64 *BitMap;
389 UINTN Bits;
390 UINTN Result;
391 UINTN Shift;
392 UINTN BitsToUnitEnd;
393
394 ASSERT (NumberOfPages <= GUARDED_HEAP_MAP_ENTRY_BITS);
395
396 Result = 0;
397 Shift = 0;
398 while (NumberOfPages > 0) {
399 BitsToUnitEnd = FindGuardedMemoryMap (Address, FALSE, &BitMap);
400
401 if (NumberOfPages > BitsToUnitEnd) {
402 // Cross map unit
403 Bits = BitsToUnitEnd;
404 } else {
405 Bits = NumberOfPages;
406 }
407
408 if (BitMap != NULL) {
409 Result |= LShiftU64 (GetBits (Address, Bits, BitMap), Shift);
410 }
411
412 Shift += Bits;
413 NumberOfPages -= Bits;
414 Address += EFI_PAGES_TO_SIZE (Bits);
415 }
416
417 return Result;
418 }
419
420 /**
421 Get bit value in bitmap table for the given address.
422
423 @param[in] Address The address to retrieve for.
424
425 @return 1 or 0.
426 **/
427 UINTN
428 EFIAPI
429 GetGuardMapBit (
430 IN EFI_PHYSICAL_ADDRESS Address
431 )
432 {
433 UINT64 *GuardMap;
434
435 FindGuardedMemoryMap (Address, FALSE, &GuardMap);
436 if (GuardMap != NULL) {
437 if (RShiftU64 (*GuardMap,
438 GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address)) & 1) {
439 return 1;
440 }
441 }
442
443 return 0;
444 }
445
446
447 /**
448 Check to see if the page at the given address is a Guard page or not.
449
450 @param[in] Address The address to check for.
451
452 @return TRUE The page at Address is a Guard page.
453 @return FALSE The page at Address is not a Guard page.
454 **/
455 BOOLEAN
456 EFIAPI
457 IsGuardPage (
458 IN EFI_PHYSICAL_ADDRESS Address
459 )
460 {
461 UINTN BitMap;
462
463 //
464 // There must be at least one guarded page before and/or after given
465 // address if it's a Guard page. The bitmap pattern should be one of
466 // 001, 100 and 101
467 //
468 BitMap = GetGuardedMemoryBits (Address - EFI_PAGE_SIZE, 3);
469 return ((BitMap == BIT0) || (BitMap == BIT2) || (BitMap == (BIT2 | BIT0)));
470 }
471
472
473 /**
474 Check to see if the page at the given address is guarded or not.
475
476 @param[in] Address The address to check for.
477
478 @return TRUE The page at Address is guarded.
479 @return FALSE The page at Address is not guarded.
480 **/
481 BOOLEAN
482 EFIAPI
483 IsMemoryGuarded (
484 IN EFI_PHYSICAL_ADDRESS Address
485 )
486 {
487 return (GetGuardMapBit (Address) == 1);
488 }
489
490 /**
491 Set the page at the given address to be a Guard page.
492
493 This is done by changing the page table attribute to be NOT PRSENT.
494
495 @param[in] BaseAddress Page address to Guard at
496
497 @return VOID
498 **/
499 VOID
500 EFIAPI
501 SetGuardPage (
502 IN EFI_PHYSICAL_ADDRESS BaseAddress
503 )
504 {
505 EFI_STATUS Status;
506
507 if (gCpu == NULL) {
508 return;
509 }
510
511 //
512 // Set flag to make sure allocating memory without GUARD for page table
513 // operation; otherwise infinite loops could be caused.
514 //
515 mOnGuarding = TRUE;
516 //
517 // Note: This might overwrite other attributes needed by other features,
518 // such as NX memory protection.
519 //
520 Status = gCpu->SetMemoryAttributes (gCpu, BaseAddress, EFI_PAGE_SIZE, EFI_MEMORY_RP);
521 ASSERT_EFI_ERROR (Status);
522 mOnGuarding = FALSE;
523 }
524
525 /**
526 Unset the Guard page at the given address to the normal memory.
527
528 This is done by changing the page table attribute to be PRSENT.
529
530 @param[in] BaseAddress Page address to Guard at.
531
532 @return VOID.
533 **/
534 VOID
535 EFIAPI
536 UnsetGuardPage (
537 IN EFI_PHYSICAL_ADDRESS BaseAddress
538 )
539 {
540 UINT64 Attributes;
541 EFI_STATUS Status;
542
543 if (gCpu == NULL) {
544 return;
545 }
546
547 //
548 // Once the Guard page is unset, it will be freed back to memory pool. NX
549 // memory protection must be restored for this page if NX is enabled for free
550 // memory.
551 //
552 Attributes = 0;
553 if ((PcdGet64 (PcdDxeNxMemoryProtectionPolicy) & (1 << EfiConventionalMemory)) != 0) {
554 Attributes |= EFI_MEMORY_XP;
555 }
556
557 //
558 // Set flag to make sure allocating memory without GUARD for page table
559 // operation; otherwise infinite loops could be caused.
560 //
561 mOnGuarding = TRUE;
562 //
563 // Note: This might overwrite other attributes needed by other features,
564 // such as memory protection (NX). Please make sure they are not enabled
565 // at the same time.
566 //
567 Status = gCpu->SetMemoryAttributes (gCpu, BaseAddress, EFI_PAGE_SIZE, Attributes);
568 ASSERT_EFI_ERROR (Status);
569 mOnGuarding = FALSE;
570 }
571
572 /**
573 Check to see if the memory at the given address should be guarded or not.
574
575 @param[in] MemoryType Memory type to check.
576 @param[in] AllocateType Allocation type to check.
577 @param[in] PageOrPool Indicate a page allocation or pool allocation.
578
579
580 @return TRUE The given type of memory should be guarded.
581 @return FALSE The given type of memory should not be guarded.
582 **/
583 BOOLEAN
584 IsMemoryTypeToGuard (
585 IN EFI_MEMORY_TYPE MemoryType,
586 IN EFI_ALLOCATE_TYPE AllocateType,
587 IN UINT8 PageOrPool
588 )
589 {
590 UINT64 TestBit;
591 UINT64 ConfigBit;
592
593 if (AllocateType == AllocateAddress) {
594 return FALSE;
595 }
596
597 if ((PcdGet8 (PcdHeapGuardPropertyMask) & PageOrPool) == 0) {
598 return FALSE;
599 }
600
601 if (PageOrPool == GUARD_HEAP_TYPE_POOL) {
602 ConfigBit = PcdGet64 (PcdHeapGuardPoolType);
603 } else if (PageOrPool == GUARD_HEAP_TYPE_PAGE) {
604 ConfigBit = PcdGet64 (PcdHeapGuardPageType);
605 } else {
606 ConfigBit = (UINT64)-1;
607 }
608
609 if ((UINT32)MemoryType >= MEMORY_TYPE_OS_RESERVED_MIN) {
610 TestBit = BIT63;
611 } else if ((UINT32) MemoryType >= MEMORY_TYPE_OEM_RESERVED_MIN) {
612 TestBit = BIT62;
613 } else if (MemoryType < EfiMaxMemoryType) {
614 TestBit = LShiftU64 (1, MemoryType);
615 } else if (MemoryType == EfiMaxMemoryType) {
616 TestBit = (UINT64)-1;
617 } else {
618 TestBit = 0;
619 }
620
621 return ((ConfigBit & TestBit) != 0);
622 }
623
624 /**
625 Check to see if the pool at the given address should be guarded or not.
626
627 @param[in] MemoryType Pool type to check.
628
629
630 @return TRUE The given type of pool should be guarded.
631 @return FALSE The given type of pool should not be guarded.
632 **/
633 BOOLEAN
634 IsPoolTypeToGuard (
635 IN EFI_MEMORY_TYPE MemoryType
636 )
637 {
638 return IsMemoryTypeToGuard (MemoryType, AllocateAnyPages,
639 GUARD_HEAP_TYPE_POOL);
640 }
641
642 /**
643 Check to see if the page at the given address should be guarded or not.
644
645 @param[in] MemoryType Page type to check.
646 @param[in] AllocateType Allocation type to check.
647
648 @return TRUE The given type of page should be guarded.
649 @return FALSE The given type of page should not be guarded.
650 **/
651 BOOLEAN
652 IsPageTypeToGuard (
653 IN EFI_MEMORY_TYPE MemoryType,
654 IN EFI_ALLOCATE_TYPE AllocateType
655 )
656 {
657 return IsMemoryTypeToGuard (MemoryType, AllocateType, GUARD_HEAP_TYPE_PAGE);
658 }
659
660 /**
661 Check to see if the heap guard is enabled for page and/or pool allocation.
662
663 @return TRUE/FALSE.
664 **/
665 BOOLEAN
666 IsHeapGuardEnabled (
667 VOID
668 )
669 {
670 return IsMemoryTypeToGuard (EfiMaxMemoryType, AllocateAnyPages,
671 GUARD_HEAP_TYPE_POOL|GUARD_HEAP_TYPE_PAGE);
672 }
673
674 /**
675 Set head Guard and tail Guard for the given memory range.
676
677 @param[in] Memory Base address of memory to set guard for.
678 @param[in] NumberOfPages Memory size in pages.
679
680 @return VOID
681 **/
682 VOID
683 SetGuardForMemory (
684 IN EFI_PHYSICAL_ADDRESS Memory,
685 IN UINTN NumberOfPages
686 )
687 {
688 EFI_PHYSICAL_ADDRESS GuardPage;
689
690 //
691 // Set tail Guard
692 //
693 GuardPage = Memory + EFI_PAGES_TO_SIZE (NumberOfPages);
694 if (!IsGuardPage (GuardPage)) {
695 SetGuardPage (GuardPage);
696 }
697
698 // Set head Guard
699 GuardPage = Memory - EFI_PAGES_TO_SIZE (1);
700 if (!IsGuardPage (GuardPage)) {
701 SetGuardPage (GuardPage);
702 }
703
704 //
705 // Mark the memory range as Guarded
706 //
707 SetGuardedMemoryBits (Memory, NumberOfPages);
708 }
709
710 /**
711 Unset head Guard and tail Guard for the given memory range.
712
713 @param[in] Memory Base address of memory to unset guard for.
714 @param[in] NumberOfPages Memory size in pages.
715
716 @return VOID
717 **/
718 VOID
719 UnsetGuardForMemory (
720 IN EFI_PHYSICAL_ADDRESS Memory,
721 IN UINTN NumberOfPages
722 )
723 {
724 EFI_PHYSICAL_ADDRESS GuardPage;
725 UINT64 GuardBitmap;
726
727 if (NumberOfPages == 0) {
728 return;
729 }
730
731 //
732 // Head Guard must be one page before, if any.
733 //
734 // MSB-> 1 0 <-LSB
735 // -------------------
736 // Head Guard -> 0 1 -> Don't free Head Guard (shared Guard)
737 // Head Guard -> 0 0 -> Free Head Guard either (not shared Guard)
738 // 1 X -> Don't free first page (need a new Guard)
739 // (it'll be turned into a Guard page later)
740 // -------------------
741 // Start -> -1 -2
742 //
743 GuardPage = Memory - EFI_PAGES_TO_SIZE (1);
744 GuardBitmap = GetGuardedMemoryBits (Memory - EFI_PAGES_TO_SIZE (2), 2);
745 if ((GuardBitmap & BIT1) == 0) {
746 //
747 // Head Guard exists.
748 //
749 if ((GuardBitmap & BIT0) == 0) {
750 //
751 // If the head Guard is not a tail Guard of adjacent memory block,
752 // unset it.
753 //
754 UnsetGuardPage (GuardPage);
755 }
756 } else {
757 //
758 // Pages before memory to free are still in Guard. It's a partial free
759 // case. Turn first page of memory block to free into a new Guard.
760 //
761 SetGuardPage (Memory);
762 }
763
764 //
765 // Tail Guard must be the page after this memory block to free, if any.
766 //
767 // MSB-> 1 0 <-LSB
768 // --------------------
769 // 1 0 <- Tail Guard -> Don't free Tail Guard (shared Guard)
770 // 0 0 <- Tail Guard -> Free Tail Guard either (not shared Guard)
771 // X 1 -> Don't free last page (need a new Guard)
772 // (it'll be turned into a Guard page later)
773 // --------------------
774 // +1 +0 <- End
775 //
776 GuardPage = Memory + EFI_PAGES_TO_SIZE (NumberOfPages);
777 GuardBitmap = GetGuardedMemoryBits (GuardPage, 2);
778 if ((GuardBitmap & BIT0) == 0) {
779 //
780 // Tail Guard exists.
781 //
782 if ((GuardBitmap & BIT1) == 0) {
783 //
784 // If the tail Guard is not a head Guard of adjacent memory block,
785 // free it; otherwise, keep it.
786 //
787 UnsetGuardPage (GuardPage);
788 }
789 } else {
790 //
791 // Pages after memory to free are still in Guard. It's a partial free
792 // case. We need to keep one page to be a head Guard.
793 //
794 SetGuardPage (GuardPage - EFI_PAGES_TO_SIZE (1));
795 }
796
797 //
798 // No matter what, we just clear the mark of the Guarded memory.
799 //
800 ClearGuardedMemoryBits(Memory, NumberOfPages);
801 }
802
803 /**
804 Adjust address of free memory according to existing and/or required Guard.
805
806 This function will check if there're existing Guard pages of adjacent
807 memory blocks, and try to use it as the Guard page of the memory to be
808 allocated.
809
810 @param[in] Start Start address of free memory block.
811 @param[in] Size Size of free memory block.
812 @param[in] SizeRequested Size of memory to allocate.
813
814 @return The end address of memory block found.
815 @return 0 if no enough space for the required size of memory and its Guard.
816 **/
817 UINT64
818 AdjustMemoryS (
819 IN UINT64 Start,
820 IN UINT64 Size,
821 IN UINT64 SizeRequested
822 )
823 {
824 UINT64 Target;
825
826 //
827 // UEFI spec requires that allocated pool must be 8-byte aligned. If it's
828 // indicated to put the pool near the Tail Guard, we need extra bytes to
829 // make sure alignment of the returned pool address.
830 //
831 if ((PcdGet8 (PcdHeapGuardPropertyMask) & BIT7) == 0) {
832 SizeRequested = ALIGN_VALUE(SizeRequested, 8);
833 }
834
835 Target = Start + Size - SizeRequested;
836 ASSERT (Target >= Start);
837 if (Target == 0) {
838 return 0;
839 }
840
841 if (!IsGuardPage (Start + Size)) {
842 // No Guard at tail to share. One more page is needed.
843 Target -= EFI_PAGES_TO_SIZE (1);
844 }
845
846 // Out of range?
847 if (Target < Start) {
848 return 0;
849 }
850
851 // At the edge?
852 if (Target == Start) {
853 if (!IsGuardPage (Target - EFI_PAGES_TO_SIZE (1))) {
854 // No enough space for a new head Guard if no Guard at head to share.
855 return 0;
856 }
857 }
858
859 // OK, we have enough pages for memory and its Guards. Return the End of the
860 // free space.
861 return Target + SizeRequested - 1;
862 }
863
864 /**
865 Adjust the start address and number of pages to free according to Guard.
866
867 The purpose of this function is to keep the shared Guard page with adjacent
868 memory block if it's still in guard, or free it if no more sharing. Another
869 is to reserve pages as Guard pages in partial page free situation.
870
871 @param[in,out] Memory Base address of memory to free.
872 @param[in,out] NumberOfPages Size of memory to free.
873
874 @return VOID.
875 **/
876 VOID
877 AdjustMemoryF (
878 IN OUT EFI_PHYSICAL_ADDRESS *Memory,
879 IN OUT UINTN *NumberOfPages
880 )
881 {
882 EFI_PHYSICAL_ADDRESS Start;
883 EFI_PHYSICAL_ADDRESS MemoryToTest;
884 UINTN PagesToFree;
885 UINT64 GuardBitmap;
886
887 if (Memory == NULL || NumberOfPages == NULL || *NumberOfPages == 0) {
888 return;
889 }
890
891 Start = *Memory;
892 PagesToFree = *NumberOfPages;
893
894 //
895 // Head Guard must be one page before, if any.
896 //
897 // MSB-> 1 0 <-LSB
898 // -------------------
899 // Head Guard -> 0 1 -> Don't free Head Guard (shared Guard)
900 // Head Guard -> 0 0 -> Free Head Guard either (not shared Guard)
901 // 1 X -> Don't free first page (need a new Guard)
902 // (it'll be turned into a Guard page later)
903 // -------------------
904 // Start -> -1 -2
905 //
906 MemoryToTest = Start - EFI_PAGES_TO_SIZE (2);
907 GuardBitmap = GetGuardedMemoryBits (MemoryToTest, 2);
908 if ((GuardBitmap & BIT1) == 0) {
909 //
910 // Head Guard exists.
911 //
912 if ((GuardBitmap & BIT0) == 0) {
913 //
914 // If the head Guard is not a tail Guard of adjacent memory block,
915 // free it; otherwise, keep it.
916 //
917 Start -= EFI_PAGES_TO_SIZE (1);
918 PagesToFree += 1;
919 }
920 } else {
921 //
922 // No Head Guard, and pages before memory to free are still in Guard. It's a
923 // partial free case. We need to keep one page to be a tail Guard.
924 //
925 Start += EFI_PAGES_TO_SIZE (1);
926 PagesToFree -= 1;
927 }
928
929 //
930 // Tail Guard must be the page after this memory block to free, if any.
931 //
932 // MSB-> 1 0 <-LSB
933 // --------------------
934 // 1 0 <- Tail Guard -> Don't free Tail Guard (shared Guard)
935 // 0 0 <- Tail Guard -> Free Tail Guard either (not shared Guard)
936 // X 1 -> Don't free last page (need a new Guard)
937 // (it'll be turned into a Guard page later)
938 // --------------------
939 // +1 +0 <- End
940 //
941 MemoryToTest = Start + EFI_PAGES_TO_SIZE (PagesToFree);
942 GuardBitmap = GetGuardedMemoryBits (MemoryToTest, 2);
943 if ((GuardBitmap & BIT0) == 0) {
944 //
945 // Tail Guard exists.
946 //
947 if ((GuardBitmap & BIT1) == 0) {
948 //
949 // If the tail Guard is not a head Guard of adjacent memory block,
950 // free it; otherwise, keep it.
951 //
952 PagesToFree += 1;
953 }
954 } else if (PagesToFree > 0) {
955 //
956 // No Tail Guard, and pages after memory to free are still in Guard. It's a
957 // partial free case. We need to keep one page to be a head Guard.
958 //
959 PagesToFree -= 1;
960 }
961
962 *Memory = Start;
963 *NumberOfPages = PagesToFree;
964 }
965
966 /**
967 Adjust the base and number of pages to really allocate according to Guard.
968
969 @param[in,out] Memory Base address of free memory.
970 @param[in,out] NumberOfPages Size of memory to allocate.
971
972 @return VOID.
973 **/
974 VOID
975 AdjustMemoryA (
976 IN OUT EFI_PHYSICAL_ADDRESS *Memory,
977 IN OUT UINTN *NumberOfPages
978 )
979 {
980 //
981 // FindFreePages() has already taken the Guard into account. It's safe to
982 // adjust the start address and/or number of pages here, to make sure that
983 // the Guards are also "allocated".
984 //
985 if (!IsGuardPage (*Memory + EFI_PAGES_TO_SIZE (*NumberOfPages))) {
986 // No tail Guard, add one.
987 *NumberOfPages += 1;
988 }
989
990 if (!IsGuardPage (*Memory - EFI_PAGE_SIZE)) {
991 // No head Guard, add one.
992 *Memory -= EFI_PAGE_SIZE;
993 *NumberOfPages += 1;
994 }
995 }
996
997 /**
998 Adjust the pool head position to make sure the Guard page is adjavent to
999 pool tail or pool head.
1000
1001 @param[in] Memory Base address of memory allocated.
1002 @param[in] NoPages Number of pages actually allocated.
1003 @param[in] Size Size of memory requested.
1004 (plus pool head/tail overhead)
1005
1006 @return Address of pool head.
1007 **/
1008 VOID *
1009 AdjustPoolHeadA (
1010 IN EFI_PHYSICAL_ADDRESS Memory,
1011 IN UINTN NoPages,
1012 IN UINTN Size
1013 )
1014 {
1015 if (Memory == 0 || (PcdGet8 (PcdHeapGuardPropertyMask) & BIT7) != 0) {
1016 //
1017 // Pool head is put near the head Guard
1018 //
1019 return (VOID *)(UINTN)Memory;
1020 }
1021
1022 //
1023 // Pool head is put near the tail Guard
1024 //
1025 Size = ALIGN_VALUE (Size, 8);
1026 return (VOID *)(UINTN)(Memory + EFI_PAGES_TO_SIZE (NoPages) - Size);
1027 }
1028
1029 /**
1030 Get the page base address according to pool head address.
1031
1032 @param[in] Memory Head address of pool to free.
1033
1034 @return Address of pool head.
1035 **/
1036 VOID *
1037 AdjustPoolHeadF (
1038 IN EFI_PHYSICAL_ADDRESS Memory
1039 )
1040 {
1041 if (Memory == 0 || (PcdGet8 (PcdHeapGuardPropertyMask) & BIT7) != 0) {
1042 //
1043 // Pool head is put near the head Guard
1044 //
1045 return (VOID *)(UINTN)Memory;
1046 }
1047
1048 //
1049 // Pool head is put near the tail Guard
1050 //
1051 return (VOID *)(UINTN)(Memory & ~EFI_PAGE_MASK);
1052 }
1053
1054 /**
1055 Allocate or free guarded memory.
1056
1057 @param[in] Start Start address of memory to allocate or free.
1058 @param[in] NumberOfPages Memory size in pages.
1059 @param[in] NewType Memory type to convert to.
1060
1061 @return VOID.
1062 **/
1063 EFI_STATUS
1064 CoreConvertPagesWithGuard (
1065 IN UINT64 Start,
1066 IN UINTN NumberOfPages,
1067 IN EFI_MEMORY_TYPE NewType
1068 )
1069 {
1070 UINT64 OldStart;
1071 UINTN OldPages;
1072
1073 if (NewType == EfiConventionalMemory) {
1074 OldStart = Start;
1075 OldPages = NumberOfPages;
1076
1077 AdjustMemoryF (&Start, &NumberOfPages);
1078 //
1079 // It's safe to unset Guard page inside memory lock because there should
1080 // be no memory allocation occurred in updating memory page attribute at
1081 // this point. And unsetting Guard page before free will prevent Guard
1082 // page just freed back to pool from being allocated right away before
1083 // marking it usable (from non-present to present).
1084 //
1085 UnsetGuardForMemory (OldStart, OldPages);
1086 if (NumberOfPages == 0) {
1087 return EFI_SUCCESS;
1088 }
1089 } else {
1090 AdjustMemoryA (&Start, &NumberOfPages);
1091 }
1092
1093 return CoreConvertPages (Start, NumberOfPages, NewType);
1094 }
1095
1096 /**
1097 Set all Guard pages which cannot be set before CPU Arch Protocol installed.
1098 **/
1099 VOID
1100 SetAllGuardPages (
1101 VOID
1102 )
1103 {
1104 UINTN Entries[GUARDED_HEAP_MAP_TABLE_DEPTH];
1105 UINTN Shifts[GUARDED_HEAP_MAP_TABLE_DEPTH];
1106 UINTN Indices[GUARDED_HEAP_MAP_TABLE_DEPTH];
1107 UINT64 Tables[GUARDED_HEAP_MAP_TABLE_DEPTH];
1108 UINT64 Addresses[GUARDED_HEAP_MAP_TABLE_DEPTH];
1109 UINT64 TableEntry;
1110 UINT64 Address;
1111 UINT64 GuardPage;
1112 INTN Level;
1113 UINTN Index;
1114 BOOLEAN OnGuarding;
1115
1116 if (mGuardedMemoryMap == 0 ||
1117 mMapLevel == 0 ||
1118 mMapLevel > GUARDED_HEAP_MAP_TABLE_DEPTH) {
1119 return;
1120 }
1121
1122 CopyMem (Entries, mLevelMask, sizeof (Entries));
1123 CopyMem (Shifts, mLevelShift, sizeof (Shifts));
1124
1125 SetMem (Tables, sizeof(Tables), 0);
1126 SetMem (Addresses, sizeof(Addresses), 0);
1127 SetMem (Indices, sizeof(Indices), 0);
1128
1129 Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;
1130 Tables[Level] = mGuardedMemoryMap;
1131 Address = 0;
1132 OnGuarding = FALSE;
1133
1134 DEBUG_CODE (
1135 DumpGuardedMemoryBitmap ();
1136 );
1137
1138 while (TRUE) {
1139 if (Indices[Level] > Entries[Level]) {
1140 Tables[Level] = 0;
1141 Level -= 1;
1142 } else {
1143
1144 TableEntry = ((UINT64 *)(UINTN)(Tables[Level]))[Indices[Level]];
1145 Address = Addresses[Level];
1146
1147 if (TableEntry == 0) {
1148
1149 OnGuarding = FALSE;
1150
1151 } else if (Level < GUARDED_HEAP_MAP_TABLE_DEPTH - 1) {
1152
1153 Level += 1;
1154 Tables[Level] = TableEntry;
1155 Addresses[Level] = Address;
1156 Indices[Level] = 0;
1157
1158 continue;
1159
1160 } else {
1161
1162 Index = 0;
1163 while (Index < GUARDED_HEAP_MAP_ENTRY_BITS) {
1164 if ((TableEntry & 1) == 1) {
1165 if (OnGuarding) {
1166 GuardPage = 0;
1167 } else {
1168 GuardPage = Address - EFI_PAGE_SIZE;
1169 }
1170 OnGuarding = TRUE;
1171 } else {
1172 if (OnGuarding) {
1173 GuardPage = Address;
1174 } else {
1175 GuardPage = 0;
1176 }
1177 OnGuarding = FALSE;
1178 }
1179
1180 if (GuardPage != 0) {
1181 SetGuardPage (GuardPage);
1182 }
1183
1184 if (TableEntry == 0) {
1185 break;
1186 }
1187
1188 TableEntry = RShiftU64 (TableEntry, 1);
1189 Address += EFI_PAGE_SIZE;
1190 Index += 1;
1191 }
1192 }
1193 }
1194
1195 if (Level < (GUARDED_HEAP_MAP_TABLE_DEPTH - (INTN)mMapLevel)) {
1196 break;
1197 }
1198
1199 Indices[Level] += 1;
1200 Address = (Level == 0) ? 0 : Addresses[Level - 1];
1201 Addresses[Level] = Address | LShiftU64(Indices[Level], Shifts[Level]);
1202
1203 }
1204 }
1205
1206 /**
1207 Notify function used to set all Guard pages before CPU Arch Protocol installed.
1208 **/
1209 VOID
1210 HeapGuardCpuArchProtocolNotify (
1211 VOID
1212 )
1213 {
1214 ASSERT (gCpu != NULL);
1215 SetAllGuardPages ();
1216 }
1217
1218 /**
1219 Helper function to convert a UINT64 value in binary to a string.
1220
1221 @param[in] Value Value of a UINT64 integer.
1222 @param[out] BinString String buffer to contain the conversion result.
1223
1224 @return VOID.
1225 **/
1226 VOID
1227 Uint64ToBinString (
1228 IN UINT64 Value,
1229 OUT CHAR8 *BinString
1230 )
1231 {
1232 UINTN Index;
1233
1234 if (BinString == NULL) {
1235 return;
1236 }
1237
1238 for (Index = 64; Index > 0; --Index) {
1239 BinString[Index - 1] = '0' + (Value & 1);
1240 Value = RShiftU64 (Value, 1);
1241 }
1242 BinString[64] = '\0';
1243 }
1244
1245 /**
1246 Dump the guarded memory bit map.
1247 **/
1248 VOID
1249 EFIAPI
1250 DumpGuardedMemoryBitmap (
1251 VOID
1252 )
1253 {
1254 UINTN Entries[GUARDED_HEAP_MAP_TABLE_DEPTH];
1255 UINTN Shifts[GUARDED_HEAP_MAP_TABLE_DEPTH];
1256 UINTN Indices[GUARDED_HEAP_MAP_TABLE_DEPTH];
1257 UINT64 Tables[GUARDED_HEAP_MAP_TABLE_DEPTH];
1258 UINT64 Addresses[GUARDED_HEAP_MAP_TABLE_DEPTH];
1259 UINT64 TableEntry;
1260 UINT64 Address;
1261 INTN Level;
1262 UINTN RepeatZero;
1263 CHAR8 String[GUARDED_HEAP_MAP_ENTRY_BITS + 1];
1264 CHAR8 *Ruler1;
1265 CHAR8 *Ruler2;
1266
1267 if (mGuardedMemoryMap == 0 ||
1268 mMapLevel == 0 ||
1269 mMapLevel > GUARDED_HEAP_MAP_TABLE_DEPTH) {
1270 return;
1271 }
1272
1273 Ruler1 = " 3 2 1 0";
1274 Ruler2 = "FEDCBA9876543210FEDCBA9876543210FEDCBA9876543210FEDCBA9876543210";
1275
1276 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "============================="
1277 " Guarded Memory Bitmap "
1278 "==============================\r\n"));
1279 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, " %a\r\n", Ruler1));
1280 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, " %a\r\n", Ruler2));
1281
1282 CopyMem (Entries, mLevelMask, sizeof (Entries));
1283 CopyMem (Shifts, mLevelShift, sizeof (Shifts));
1284
1285 SetMem (Indices, sizeof(Indices), 0);
1286 SetMem (Tables, sizeof(Tables), 0);
1287 SetMem (Addresses, sizeof(Addresses), 0);
1288
1289 Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;
1290 Tables[Level] = mGuardedMemoryMap;
1291 Address = 0;
1292 RepeatZero = 0;
1293
1294 while (TRUE) {
1295 if (Indices[Level] > Entries[Level]) {
1296
1297 Tables[Level] = 0;
1298 Level -= 1;
1299 RepeatZero = 0;
1300
1301 DEBUG ((
1302 HEAP_GUARD_DEBUG_LEVEL,
1303 "========================================="
1304 "=========================================\r\n"
1305 ));
1306
1307 } else {
1308
1309 TableEntry = ((UINT64 *)(UINTN)Tables[Level])[Indices[Level]];
1310 Address = Addresses[Level];
1311
1312 if (TableEntry == 0) {
1313
1314 if (Level == GUARDED_HEAP_MAP_TABLE_DEPTH - 1) {
1315 if (RepeatZero == 0) {
1316 Uint64ToBinString(TableEntry, String);
1317 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "%016lx: %a\r\n", Address, String));
1318 } else if (RepeatZero == 1) {
1319 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "... : ...\r\n"));
1320 }
1321 RepeatZero += 1;
1322 }
1323
1324 } else if (Level < GUARDED_HEAP_MAP_TABLE_DEPTH - 1) {
1325
1326 Level += 1;
1327 Tables[Level] = TableEntry;
1328 Addresses[Level] = Address;
1329 Indices[Level] = 0;
1330 RepeatZero = 0;
1331
1332 continue;
1333
1334 } else {
1335
1336 RepeatZero = 0;
1337 Uint64ToBinString(TableEntry, String);
1338 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "%016lx: %a\r\n", Address, String));
1339
1340 }
1341 }
1342
1343 if (Level < (GUARDED_HEAP_MAP_TABLE_DEPTH - (INTN)mMapLevel)) {
1344 break;
1345 }
1346
1347 Indices[Level] += 1;
1348 Address = (Level == 0) ? 0 : Addresses[Level - 1];
1349 Addresses[Level] = Address | LShiftU64(Indices[Level], Shifts[Level]);
1350
1351 }
1352 }
1353