]> git.proxmox.com Git - mirror_edk2.git/blob - MdeModulePkg/Core/PiSmmCore/HeapGuard.c
UefiCpuPkg: Move AsmRelocateApLoopStart from Mpfuncs.nasm to AmdSev.nasm
[mirror_edk2.git] / MdeModulePkg / Core / PiSmmCore / HeapGuard.c
1 /** @file
2 UEFI Heap Guard functions.
3
4 Copyright (c) 2017-2018, Intel Corporation. All rights reserved.<BR>
5 SPDX-License-Identifier: BSD-2-Clause-Patent
6
7 **/
8
9 #include "HeapGuard.h"
10
11 //
12 // Global to avoid infinite reentrance of memory allocation when updating
13 // page table attributes, which may need allocating pages for new PDE/PTE.
14 //
15 GLOBAL_REMOVE_IF_UNREFERENCED BOOLEAN mOnGuarding = FALSE;
16
17 //
18 // Pointer to table tracking the Guarded memory with bitmap, in which '1'
19 // is used to indicate memory guarded. '0' might be free memory or Guard
20 // page itself, depending on status of memory adjacent to it.
21 //
22 GLOBAL_REMOVE_IF_UNREFERENCED UINT64 mGuardedMemoryMap = 0;
23
24 //
25 // Current depth level of map table pointed by mGuardedMemoryMap.
26 // mMapLevel must be initialized at least by 1. It will be automatically
27 // updated according to the address of memory just tracked.
28 //
29 GLOBAL_REMOVE_IF_UNREFERENCED UINTN mMapLevel = 1;
30
31 //
32 // Shift and mask for each level of map table
33 //
34 GLOBAL_REMOVE_IF_UNREFERENCED UINTN mLevelShift[GUARDED_HEAP_MAP_TABLE_DEPTH]
35 = GUARDED_HEAP_MAP_TABLE_DEPTH_SHIFTS;
36 GLOBAL_REMOVE_IF_UNREFERENCED UINTN mLevelMask[GUARDED_HEAP_MAP_TABLE_DEPTH]
37 = GUARDED_HEAP_MAP_TABLE_DEPTH_MASKS;
38
39 //
40 // SMM memory attribute protocol
41 //
42 EDKII_SMM_MEMORY_ATTRIBUTE_PROTOCOL *mSmmMemoryAttribute = NULL;
43
44 /**
45 Set corresponding bits in bitmap table to 1 according to the address.
46
47 @param[in] Address Start address to set for.
48 @param[in] BitNumber Number of bits to set.
49 @param[in] BitMap Pointer to bitmap which covers the Address.
50
51 @return VOID
52 **/
53 STATIC
54 VOID
55 SetBits (
56 IN EFI_PHYSICAL_ADDRESS Address,
57 IN UINTN BitNumber,
58 IN UINT64 *BitMap
59 )
60 {
61 UINTN Lsbs;
62 UINTN Qwords;
63 UINTN Msbs;
64 UINTN StartBit;
65 UINTN EndBit;
66
67 StartBit = (UINTN)GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address);
68 EndBit = (StartBit + BitNumber - 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
69
70 if ((StartBit + BitNumber) >= GUARDED_HEAP_MAP_ENTRY_BITS) {
71 Msbs = (GUARDED_HEAP_MAP_ENTRY_BITS - StartBit) %
72 GUARDED_HEAP_MAP_ENTRY_BITS;
73 Lsbs = (EndBit + 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
74 Qwords = (BitNumber - Msbs) / GUARDED_HEAP_MAP_ENTRY_BITS;
75 } else {
76 Msbs = BitNumber;
77 Lsbs = 0;
78 Qwords = 0;
79 }
80
81 if (Msbs > 0) {
82 *BitMap |= LShiftU64 (LShiftU64 (1, Msbs) - 1, StartBit);
83 BitMap += 1;
84 }
85
86 if (Qwords > 0) {
87 SetMem64 (
88 (VOID *)BitMap,
89 Qwords * GUARDED_HEAP_MAP_ENTRY_BYTES,
90 (UINT64)-1
91 );
92 BitMap += Qwords;
93 }
94
95 if (Lsbs > 0) {
96 *BitMap |= (LShiftU64 (1, Lsbs) - 1);
97 }
98 }
99
100 /**
101 Set corresponding bits in bitmap table to 0 according to the address.
102
103 @param[in] Address Start address to set for.
104 @param[in] BitNumber Number of bits to set.
105 @param[in] BitMap Pointer to bitmap which covers the Address.
106
107 @return VOID.
108 **/
109 STATIC
110 VOID
111 ClearBits (
112 IN EFI_PHYSICAL_ADDRESS Address,
113 IN UINTN BitNumber,
114 IN UINT64 *BitMap
115 )
116 {
117 UINTN Lsbs;
118 UINTN Qwords;
119 UINTN Msbs;
120 UINTN StartBit;
121 UINTN EndBit;
122
123 StartBit = (UINTN)GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address);
124 EndBit = (StartBit + BitNumber - 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
125
126 if ((StartBit + BitNumber) >= GUARDED_HEAP_MAP_ENTRY_BITS) {
127 Msbs = (GUARDED_HEAP_MAP_ENTRY_BITS - StartBit) %
128 GUARDED_HEAP_MAP_ENTRY_BITS;
129 Lsbs = (EndBit + 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
130 Qwords = (BitNumber - Msbs) / GUARDED_HEAP_MAP_ENTRY_BITS;
131 } else {
132 Msbs = BitNumber;
133 Lsbs = 0;
134 Qwords = 0;
135 }
136
137 if (Msbs > 0) {
138 *BitMap &= ~LShiftU64 (LShiftU64 (1, Msbs) - 1, StartBit);
139 BitMap += 1;
140 }
141
142 if (Qwords > 0) {
143 SetMem64 ((VOID *)BitMap, Qwords * GUARDED_HEAP_MAP_ENTRY_BYTES, 0);
144 BitMap += Qwords;
145 }
146
147 if (Lsbs > 0) {
148 *BitMap &= ~(LShiftU64 (1, Lsbs) - 1);
149 }
150 }
151
152 /**
153 Get corresponding bits in bitmap table according to the address.
154
155 The value of bit 0 corresponds to the status of memory at given Address.
156 No more than 64 bits can be retrieved in one call.
157
158 @param[in] Address Start address to retrieve bits for.
159 @param[in] BitNumber Number of bits to get.
160 @param[in] BitMap Pointer to bitmap which covers the Address.
161
162 @return An integer containing the bits information.
163 **/
164 STATIC
165 UINT64
166 GetBits (
167 IN EFI_PHYSICAL_ADDRESS Address,
168 IN UINTN BitNumber,
169 IN UINT64 *BitMap
170 )
171 {
172 UINTN StartBit;
173 UINTN EndBit;
174 UINTN Lsbs;
175 UINTN Msbs;
176 UINT64 Result;
177
178 ASSERT (BitNumber <= GUARDED_HEAP_MAP_ENTRY_BITS);
179
180 StartBit = (UINTN)GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address);
181 EndBit = (StartBit + BitNumber - 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
182
183 if ((StartBit + BitNumber) > GUARDED_HEAP_MAP_ENTRY_BITS) {
184 Msbs = GUARDED_HEAP_MAP_ENTRY_BITS - StartBit;
185 Lsbs = (EndBit + 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
186 } else {
187 Msbs = BitNumber;
188 Lsbs = 0;
189 }
190
191 if ((StartBit == 0) && (BitNumber == GUARDED_HEAP_MAP_ENTRY_BITS)) {
192 Result = *BitMap;
193 } else {
194 Result = RShiftU64 ((*BitMap), StartBit) & (LShiftU64 (1, Msbs) - 1);
195 if (Lsbs > 0) {
196 BitMap += 1;
197 Result |= LShiftU64 ((*BitMap) & (LShiftU64 (1, Lsbs) - 1), Msbs);
198 }
199 }
200
201 return Result;
202 }
203
204 /**
205 Helper function to allocate pages without Guard for internal uses.
206
207 @param[in] Pages Page number.
208
209 @return Address of memory allocated.
210 **/
211 VOID *
212 PageAlloc (
213 IN UINTN Pages
214 )
215 {
216 EFI_STATUS Status;
217 EFI_PHYSICAL_ADDRESS Memory;
218
219 Status = SmmInternalAllocatePages (
220 AllocateAnyPages,
221 EfiRuntimeServicesData,
222 Pages,
223 &Memory,
224 FALSE
225 );
226 if (EFI_ERROR (Status)) {
227 Memory = 0;
228 }
229
230 return (VOID *)(UINTN)Memory;
231 }
232
233 /**
234 Locate the pointer of bitmap from the guarded memory bitmap tables, which
235 covers the given Address.
236
237 @param[in] Address Start address to search the bitmap for.
238 @param[in] AllocMapUnit Flag to indicate memory allocation for the table.
239 @param[out] BitMap Pointer to bitmap which covers the Address.
240
241 @return The bit number from given Address to the end of current map table.
242 **/
243 UINTN
244 FindGuardedMemoryMap (
245 IN EFI_PHYSICAL_ADDRESS Address,
246 IN BOOLEAN AllocMapUnit,
247 OUT UINT64 **BitMap
248 )
249 {
250 UINTN Level;
251 UINT64 *GuardMap;
252 UINT64 MapMemory;
253 UINTN Index;
254 UINTN Size;
255 UINTN BitsToUnitEnd;
256
257 //
258 // Adjust current map table depth according to the address to access
259 //
260 while (AllocMapUnit &&
261 mMapLevel < GUARDED_HEAP_MAP_TABLE_DEPTH &&
262 RShiftU64 (
263 Address,
264 mLevelShift[GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel - 1]
265 ) != 0)
266 {
267 if (mGuardedMemoryMap != 0) {
268 Size = (mLevelMask[GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel - 1] + 1)
269 * GUARDED_HEAP_MAP_ENTRY_BYTES;
270 MapMemory = (UINT64)(UINTN)PageAlloc (EFI_SIZE_TO_PAGES (Size));
271 ASSERT (MapMemory != 0);
272
273 SetMem ((VOID *)(UINTN)MapMemory, Size, 0);
274
275 *(UINT64 *)(UINTN)MapMemory = mGuardedMemoryMap;
276 mGuardedMemoryMap = MapMemory;
277 }
278
279 mMapLevel++;
280 }
281
282 GuardMap = &mGuardedMemoryMap;
283 for (Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;
284 Level < GUARDED_HEAP_MAP_TABLE_DEPTH;
285 ++Level)
286 {
287 if (*GuardMap == 0) {
288 if (!AllocMapUnit) {
289 GuardMap = NULL;
290 break;
291 }
292
293 Size = (mLevelMask[Level] + 1) * GUARDED_HEAP_MAP_ENTRY_BYTES;
294 MapMemory = (UINT64)(UINTN)PageAlloc (EFI_SIZE_TO_PAGES (Size));
295 ASSERT (MapMemory != 0);
296
297 SetMem ((VOID *)(UINTN)MapMemory, Size, 0);
298 *GuardMap = MapMemory;
299 }
300
301 Index = (UINTN)RShiftU64 (Address, mLevelShift[Level]);
302 Index &= mLevelMask[Level];
303 GuardMap = (UINT64 *)(UINTN)((*GuardMap) + Index * sizeof (UINT64));
304 }
305
306 BitsToUnitEnd = GUARDED_HEAP_MAP_BITS - GUARDED_HEAP_MAP_BIT_INDEX (Address);
307 *BitMap = GuardMap;
308
309 return BitsToUnitEnd;
310 }
311
312 /**
313 Set corresponding bits in bitmap table to 1 according to given memory range.
314
315 @param[in] Address Memory address to guard from.
316 @param[in] NumberOfPages Number of pages to guard.
317
318 @return VOID
319 **/
320 VOID
321 EFIAPI
322 SetGuardedMemoryBits (
323 IN EFI_PHYSICAL_ADDRESS Address,
324 IN UINTN NumberOfPages
325 )
326 {
327 UINT64 *BitMap;
328 UINTN Bits;
329 UINTN BitsToUnitEnd;
330
331 while (NumberOfPages > 0) {
332 BitsToUnitEnd = FindGuardedMemoryMap (Address, TRUE, &BitMap);
333 ASSERT (BitMap != NULL);
334
335 if (NumberOfPages > BitsToUnitEnd) {
336 // Cross map unit
337 Bits = BitsToUnitEnd;
338 } else {
339 Bits = NumberOfPages;
340 }
341
342 SetBits (Address, Bits, BitMap);
343
344 NumberOfPages -= Bits;
345 Address += EFI_PAGES_TO_SIZE (Bits);
346 }
347 }
348
349 /**
350 Clear corresponding bits in bitmap table according to given memory range.
351
352 @param[in] Address Memory address to unset from.
353 @param[in] NumberOfPages Number of pages to unset guard.
354
355 @return VOID
356 **/
357 VOID
358 EFIAPI
359 ClearGuardedMemoryBits (
360 IN EFI_PHYSICAL_ADDRESS Address,
361 IN UINTN NumberOfPages
362 )
363 {
364 UINT64 *BitMap;
365 UINTN Bits;
366 UINTN BitsToUnitEnd;
367
368 while (NumberOfPages > 0) {
369 BitsToUnitEnd = FindGuardedMemoryMap (Address, TRUE, &BitMap);
370 ASSERT (BitMap != NULL);
371
372 if (NumberOfPages > BitsToUnitEnd) {
373 // Cross map unit
374 Bits = BitsToUnitEnd;
375 } else {
376 Bits = NumberOfPages;
377 }
378
379 ClearBits (Address, Bits, BitMap);
380
381 NumberOfPages -= Bits;
382 Address += EFI_PAGES_TO_SIZE (Bits);
383 }
384 }
385
386 /**
387 Retrieve corresponding bits in bitmap table according to given memory range.
388
389 @param[in] Address Memory address to retrieve from.
390 @param[in] NumberOfPages Number of pages to retrieve.
391
392 @return An integer containing the guarded memory bitmap.
393 **/
394 UINTN
395 GetGuardedMemoryBits (
396 IN EFI_PHYSICAL_ADDRESS Address,
397 IN UINTN NumberOfPages
398 )
399 {
400 UINT64 *BitMap;
401 UINTN Bits;
402 UINTN Result;
403 UINTN Shift;
404 UINTN BitsToUnitEnd;
405
406 ASSERT (NumberOfPages <= GUARDED_HEAP_MAP_ENTRY_BITS);
407
408 Result = 0;
409 Shift = 0;
410 while (NumberOfPages > 0) {
411 BitsToUnitEnd = FindGuardedMemoryMap (Address, FALSE, &BitMap);
412
413 if (NumberOfPages > BitsToUnitEnd) {
414 // Cross map unit
415 Bits = BitsToUnitEnd;
416 } else {
417 Bits = NumberOfPages;
418 }
419
420 if (BitMap != NULL) {
421 Result |= LShiftU64 (GetBits (Address, Bits, BitMap), Shift);
422 }
423
424 Shift += Bits;
425 NumberOfPages -= Bits;
426 Address += EFI_PAGES_TO_SIZE (Bits);
427 }
428
429 return Result;
430 }
431
432 /**
433 Get bit value in bitmap table for the given address.
434
435 @param[in] Address The address to retrieve for.
436
437 @return 1 or 0.
438 **/
439 UINTN
440 EFIAPI
441 GetGuardMapBit (
442 IN EFI_PHYSICAL_ADDRESS Address
443 )
444 {
445 UINT64 *GuardMap;
446
447 FindGuardedMemoryMap (Address, FALSE, &GuardMap);
448 if (GuardMap != NULL) {
449 if (RShiftU64 (
450 *GuardMap,
451 GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address)
452 ) & 1)
453 {
454 return 1;
455 }
456 }
457
458 return 0;
459 }
460
461 /**
462 Check to see if the page at the given address is a Guard page or not.
463
464 @param[in] Address The address to check for.
465
466 @return TRUE The page at Address is a Guard page.
467 @return FALSE The page at Address is not a Guard page.
468 **/
469 BOOLEAN
470 EFIAPI
471 IsGuardPage (
472 IN EFI_PHYSICAL_ADDRESS Address
473 )
474 {
475 UINTN BitMap;
476
477 //
478 // There must be at least one guarded page before and/or after given
479 // address if it's a Guard page. The bitmap pattern should be one of
480 // 001, 100 and 101
481 //
482 BitMap = GetGuardedMemoryBits (Address - EFI_PAGE_SIZE, 3);
483 return ((BitMap == BIT0) || (BitMap == BIT2) || (BitMap == (BIT2 | BIT0)));
484 }
485
486 /**
487 Check to see if the page at the given address is guarded or not.
488
489 @param[in] Address The address to check for.
490
491 @return TRUE The page at Address is guarded.
492 @return FALSE The page at Address is not guarded.
493 **/
494 BOOLEAN
495 EFIAPI
496 IsMemoryGuarded (
497 IN EFI_PHYSICAL_ADDRESS Address
498 )
499 {
500 return (GetGuardMapBit (Address) == 1);
501 }
502
503 /**
504 Set the page at the given address to be a Guard page.
505
506 This is done by changing the page table attribute to be NOT PRESENT.
507
508 @param[in] BaseAddress Page address to Guard at.
509
510 @return VOID.
511 **/
512 VOID
513 EFIAPI
514 SetGuardPage (
515 IN EFI_PHYSICAL_ADDRESS BaseAddress
516 )
517 {
518 EFI_STATUS Status;
519
520 if (mSmmMemoryAttribute != NULL) {
521 mOnGuarding = TRUE;
522 Status = mSmmMemoryAttribute->SetMemoryAttributes (
523 mSmmMemoryAttribute,
524 BaseAddress,
525 EFI_PAGE_SIZE,
526 EFI_MEMORY_RP
527 );
528 ASSERT_EFI_ERROR (Status);
529 mOnGuarding = FALSE;
530 }
531 }
532
533 /**
534 Unset the Guard page at the given address to the normal memory.
535
536 This is done by changing the page table attribute to be PRESENT.
537
538 @param[in] BaseAddress Page address to Guard at.
539
540 @return VOID.
541 **/
542 VOID
543 EFIAPI
544 UnsetGuardPage (
545 IN EFI_PHYSICAL_ADDRESS BaseAddress
546 )
547 {
548 EFI_STATUS Status;
549
550 if (mSmmMemoryAttribute != NULL) {
551 mOnGuarding = TRUE;
552 Status = mSmmMemoryAttribute->ClearMemoryAttributes (
553 mSmmMemoryAttribute,
554 BaseAddress,
555 EFI_PAGE_SIZE,
556 EFI_MEMORY_RP
557 );
558 ASSERT_EFI_ERROR (Status);
559 mOnGuarding = FALSE;
560 }
561 }
562
563 /**
564 Check to see if the memory at the given address should be guarded or not.
565
566 @param[in] MemoryType Memory type to check.
567 @param[in] AllocateType Allocation type to check.
568 @param[in] PageOrPool Indicate a page allocation or pool allocation.
569
570
571 @return TRUE The given type of memory should be guarded.
572 @return FALSE The given type of memory should not be guarded.
573 **/
574 BOOLEAN
575 IsMemoryTypeToGuard (
576 IN EFI_MEMORY_TYPE MemoryType,
577 IN EFI_ALLOCATE_TYPE AllocateType,
578 IN UINT8 PageOrPool
579 )
580 {
581 UINT64 TestBit;
582 UINT64 ConfigBit;
583
584 if ( ((PcdGet8 (PcdHeapGuardPropertyMask) & PageOrPool) == 0)
585 || mOnGuarding
586 || (AllocateType == AllocateAddress))
587 {
588 return FALSE;
589 }
590
591 ConfigBit = 0;
592 if ((PageOrPool & GUARD_HEAP_TYPE_POOL) != 0) {
593 ConfigBit |= PcdGet64 (PcdHeapGuardPoolType);
594 }
595
596 if ((PageOrPool & GUARD_HEAP_TYPE_PAGE) != 0) {
597 ConfigBit |= PcdGet64 (PcdHeapGuardPageType);
598 }
599
600 if ((MemoryType == EfiRuntimeServicesData) ||
601 (MemoryType == EfiRuntimeServicesCode))
602 {
603 TestBit = LShiftU64 (1, MemoryType);
604 } else if (MemoryType == EfiMaxMemoryType) {
605 TestBit = (UINT64)-1;
606 } else {
607 TestBit = 0;
608 }
609
610 return ((ConfigBit & TestBit) != 0);
611 }
612
613 /**
614 Check to see if the pool at the given address should be guarded or not.
615
616 @param[in] MemoryType Pool type to check.
617
618
619 @return TRUE The given type of pool should be guarded.
620 @return FALSE The given type of pool should not be guarded.
621 **/
622 BOOLEAN
623 IsPoolTypeToGuard (
624 IN EFI_MEMORY_TYPE MemoryType
625 )
626 {
627 return IsMemoryTypeToGuard (
628 MemoryType,
629 AllocateAnyPages,
630 GUARD_HEAP_TYPE_POOL
631 );
632 }
633
634 /**
635 Check to see if the page at the given address should be guarded or not.
636
637 @param[in] MemoryType Page type to check.
638 @param[in] AllocateType Allocation type to check.
639
640 @return TRUE The given type of page should be guarded.
641 @return FALSE The given type of page should not be guarded.
642 **/
643 BOOLEAN
644 IsPageTypeToGuard (
645 IN EFI_MEMORY_TYPE MemoryType,
646 IN EFI_ALLOCATE_TYPE AllocateType
647 )
648 {
649 return IsMemoryTypeToGuard (MemoryType, AllocateType, GUARD_HEAP_TYPE_PAGE);
650 }
651
652 /**
653 Check to see if the heap guard is enabled for page and/or pool allocation.
654
655 @return TRUE/FALSE.
656 **/
657 BOOLEAN
658 IsHeapGuardEnabled (
659 VOID
660 )
661 {
662 return IsMemoryTypeToGuard (
663 EfiMaxMemoryType,
664 AllocateAnyPages,
665 GUARD_HEAP_TYPE_POOL|GUARD_HEAP_TYPE_PAGE
666 );
667 }
668
669 /**
670 Set head Guard and tail Guard for the given memory range.
671
672 @param[in] Memory Base address of memory to set guard for.
673 @param[in] NumberOfPages Memory size in pages.
674
675 @return VOID.
676 **/
677 VOID
678 SetGuardForMemory (
679 IN EFI_PHYSICAL_ADDRESS Memory,
680 IN UINTN NumberOfPages
681 )
682 {
683 EFI_PHYSICAL_ADDRESS GuardPage;
684
685 //
686 // Set tail Guard
687 //
688 GuardPage = Memory + EFI_PAGES_TO_SIZE (NumberOfPages);
689 if (!IsGuardPage (GuardPage)) {
690 SetGuardPage (GuardPage);
691 }
692
693 // Set head Guard
694 GuardPage = Memory - EFI_PAGES_TO_SIZE (1);
695 if (!IsGuardPage (GuardPage)) {
696 SetGuardPage (GuardPage);
697 }
698
699 //
700 // Mark the memory range as Guarded
701 //
702 SetGuardedMemoryBits (Memory, NumberOfPages);
703 }
704
705 /**
706 Unset head Guard and tail Guard for the given memory range.
707
708 @param[in] Memory Base address of memory to unset guard for.
709 @param[in] NumberOfPages Memory size in pages.
710
711 @return VOID.
712 **/
713 VOID
714 UnsetGuardForMemory (
715 IN EFI_PHYSICAL_ADDRESS Memory,
716 IN UINTN NumberOfPages
717 )
718 {
719 EFI_PHYSICAL_ADDRESS GuardPage;
720 UINT64 GuardBitmap;
721
722 if (NumberOfPages == 0) {
723 return;
724 }
725
726 //
727 // Head Guard must be one page before, if any.
728 //
729 // MSB-> 1 0 <-LSB
730 // -------------------
731 // Head Guard -> 0 1 -> Don't free Head Guard (shared Guard)
732 // Head Guard -> 0 0 -> Free Head Guard either (not shared Guard)
733 // 1 X -> Don't free first page (need a new Guard)
734 // (it'll be turned into a Guard page later)
735 // -------------------
736 // Start -> -1 -2
737 //
738 GuardPage = Memory - EFI_PAGES_TO_SIZE (1);
739 GuardBitmap = GetGuardedMemoryBits (Memory - EFI_PAGES_TO_SIZE (2), 2);
740 if ((GuardBitmap & BIT1) == 0) {
741 //
742 // Head Guard exists.
743 //
744 if ((GuardBitmap & BIT0) == 0) {
745 //
746 // If the head Guard is not a tail Guard of adjacent memory block,
747 // unset it.
748 //
749 UnsetGuardPage (GuardPage);
750 }
751 } else {
752 //
753 // Pages before memory to free are still in Guard. It's a partial free
754 // case. Turn first page of memory block to free into a new Guard.
755 //
756 SetGuardPage (Memory);
757 }
758
759 //
760 // Tail Guard must be the page after this memory block to free, if any.
761 //
762 // MSB-> 1 0 <-LSB
763 // --------------------
764 // 1 0 <- Tail Guard -> Don't free Tail Guard (shared Guard)
765 // 0 0 <- Tail Guard -> Free Tail Guard either (not shared Guard)
766 // X 1 -> Don't free last page (need a new Guard)
767 // (it'll be turned into a Guard page later)
768 // --------------------
769 // +1 +0 <- End
770 //
771 GuardPage = Memory + EFI_PAGES_TO_SIZE (NumberOfPages);
772 GuardBitmap = GetGuardedMemoryBits (GuardPage, 2);
773 if ((GuardBitmap & BIT0) == 0) {
774 //
775 // Tail Guard exists.
776 //
777 if ((GuardBitmap & BIT1) == 0) {
778 //
779 // If the tail Guard is not a head Guard of adjacent memory block,
780 // free it; otherwise, keep it.
781 //
782 UnsetGuardPage (GuardPage);
783 }
784 } else {
785 //
786 // Pages after memory to free are still in Guard. It's a partial free
787 // case. We need to keep one page to be a head Guard.
788 //
789 SetGuardPage (GuardPage - EFI_PAGES_TO_SIZE (1));
790 }
791
792 //
793 // No matter what, we just clear the mark of the Guarded memory.
794 //
795 ClearGuardedMemoryBits (Memory, NumberOfPages);
796 }
797
798 /**
799 Adjust the start address and number of pages to free according to Guard.
800
801 The purpose of this function is to keep the shared Guard page with adjacent
802 memory block if it's still in guard, or free it if no more sharing. Another
803 is to reserve pages as Guard pages in partial page free situation.
804
805 @param[in,out] Memory Base address of memory to free.
806 @param[in,out] NumberOfPages Size of memory to free.
807
808 @return VOID.
809 **/
810 VOID
811 AdjustMemoryF (
812 IN OUT EFI_PHYSICAL_ADDRESS *Memory,
813 IN OUT UINTN *NumberOfPages
814 )
815 {
816 EFI_PHYSICAL_ADDRESS Start;
817 EFI_PHYSICAL_ADDRESS MemoryToTest;
818 UINTN PagesToFree;
819 UINT64 GuardBitmap;
820 UINT64 Attributes;
821
822 if ((Memory == NULL) || (NumberOfPages == NULL) || (*NumberOfPages == 0)) {
823 return;
824 }
825
826 Start = *Memory;
827 PagesToFree = *NumberOfPages;
828
829 //
830 // In case the memory to free is marked as read-only (e.g. EfiRuntimeServicesCode).
831 //
832 if (mSmmMemoryAttribute != NULL) {
833 Attributes = 0;
834 mSmmMemoryAttribute->GetMemoryAttributes (
835 mSmmMemoryAttribute,
836 Start,
837 EFI_PAGES_TO_SIZE (PagesToFree),
838 &Attributes
839 );
840 if ((Attributes & EFI_MEMORY_RO) != 0) {
841 mSmmMemoryAttribute->ClearMemoryAttributes (
842 mSmmMemoryAttribute,
843 Start,
844 EFI_PAGES_TO_SIZE (PagesToFree),
845 EFI_MEMORY_RO
846 );
847 }
848 }
849
850 //
851 // Head Guard must be one page before, if any.
852 //
853 // MSB-> 1 0 <-LSB
854 // -------------------
855 // Head Guard -> 0 1 -> Don't free Head Guard (shared Guard)
856 // Head Guard -> 0 0 -> Free Head Guard either (not shared Guard)
857 // 1 X -> Don't free first page (need a new Guard)
858 // (it'll be turned into a Guard page later)
859 // -------------------
860 // Start -> -1 -2
861 //
862 MemoryToTest = Start - EFI_PAGES_TO_SIZE (2);
863 GuardBitmap = GetGuardedMemoryBits (MemoryToTest, 2);
864 if ((GuardBitmap & BIT1) == 0) {
865 //
866 // Head Guard exists.
867 //
868 if ((GuardBitmap & BIT0) == 0) {
869 //
870 // If the head Guard is not a tail Guard of adjacent memory block,
871 // free it; otherwise, keep it.
872 //
873 Start -= EFI_PAGES_TO_SIZE (1);
874 PagesToFree += 1;
875 }
876 } else {
877 //
878 // No Head Guard, and pages before memory to free are still in Guard. It's a
879 // partial free case. We need to keep one page to be a tail Guard.
880 //
881 Start += EFI_PAGES_TO_SIZE (1);
882 PagesToFree -= 1;
883 }
884
885 //
886 // Tail Guard must be the page after this memory block to free, if any.
887 //
888 // MSB-> 1 0 <-LSB
889 // --------------------
890 // 1 0 <- Tail Guard -> Don't free Tail Guard (shared Guard)
891 // 0 0 <- Tail Guard -> Free Tail Guard either (not shared Guard)
892 // X 1 -> Don't free last page (need a new Guard)
893 // (it'll be turned into a Guard page later)
894 // --------------------
895 // +1 +0 <- End
896 //
897 MemoryToTest = Start + EFI_PAGES_TO_SIZE (PagesToFree);
898 GuardBitmap = GetGuardedMemoryBits (MemoryToTest, 2);
899 if ((GuardBitmap & BIT0) == 0) {
900 //
901 // Tail Guard exists.
902 //
903 if ((GuardBitmap & BIT1) == 0) {
904 //
905 // If the tail Guard is not a head Guard of adjacent memory block,
906 // free it; otherwise, keep it.
907 //
908 PagesToFree += 1;
909 }
910 } else if (PagesToFree > 0) {
911 //
912 // No Tail Guard, and pages after memory to free are still in Guard. It's a
913 // partial free case. We need to keep one page to be a head Guard.
914 //
915 PagesToFree -= 1;
916 }
917
918 *Memory = Start;
919 *NumberOfPages = PagesToFree;
920 }
921
922 /**
923 Adjust the pool head position to make sure the Guard page is adjavent to
924 pool tail or pool head.
925
926 @param[in] Memory Base address of memory allocated.
927 @param[in] NoPages Number of pages actually allocated.
928 @param[in] Size Size of memory requested.
929 (plus pool head/tail overhead)
930
931 @return Address of pool head
932 **/
933 VOID *
934 AdjustPoolHeadA (
935 IN EFI_PHYSICAL_ADDRESS Memory,
936 IN UINTN NoPages,
937 IN UINTN Size
938 )
939 {
940 if ((Memory == 0) || ((PcdGet8 (PcdHeapGuardPropertyMask) & BIT7) != 0)) {
941 //
942 // Pool head is put near the head Guard
943 //
944 return (VOID *)(UINTN)Memory;
945 }
946
947 //
948 // Pool head is put near the tail Guard
949 //
950 Size = ALIGN_VALUE (Size, 8);
951 return (VOID *)(UINTN)(Memory + EFI_PAGES_TO_SIZE (NoPages) - Size);
952 }
953
954 /**
955 Get the page base address according to pool head address.
956
957 @param[in] Memory Head address of pool to free.
958
959 @return Address of pool head.
960 **/
961 VOID *
962 AdjustPoolHeadF (
963 IN EFI_PHYSICAL_ADDRESS Memory
964 )
965 {
966 if ((Memory == 0) || ((PcdGet8 (PcdHeapGuardPropertyMask) & BIT7) != 0)) {
967 //
968 // Pool head is put near the head Guard
969 //
970 return (VOID *)(UINTN)Memory;
971 }
972
973 //
974 // Pool head is put near the tail Guard
975 //
976 return (VOID *)(UINTN)(Memory & ~EFI_PAGE_MASK);
977 }
978
979 /**
980 Helper function of memory allocation with Guard pages.
981
982 @param FreePageList The free page node.
983 @param NumberOfPages Number of pages to be allocated.
984 @param MaxAddress Request to allocate memory below this address.
985 @param MemoryType Type of memory requested.
986
987 @return Memory address of allocated pages.
988 **/
989 UINTN
990 InternalAllocMaxAddressWithGuard (
991 IN OUT LIST_ENTRY *FreePageList,
992 IN UINTN NumberOfPages,
993 IN UINTN MaxAddress,
994 IN EFI_MEMORY_TYPE MemoryType
995
996 )
997 {
998 LIST_ENTRY *Node;
999 FREE_PAGE_LIST *Pages;
1000 UINTN PagesToAlloc;
1001 UINTN HeadGuard;
1002 UINTN TailGuard;
1003 UINTN Address;
1004
1005 for (Node = FreePageList->BackLink; Node != FreePageList;
1006 Node = Node->BackLink)
1007 {
1008 Pages = BASE_CR (Node, FREE_PAGE_LIST, Link);
1009 if ((Pages->NumberOfPages >= NumberOfPages) &&
1010 ((UINTN)Pages + EFI_PAGES_TO_SIZE (NumberOfPages) - 1 <= MaxAddress))
1011 {
1012 //
1013 // We may need 1 or 2 more pages for Guard. Check it out.
1014 //
1015 PagesToAlloc = NumberOfPages;
1016 TailGuard = (UINTN)Pages + EFI_PAGES_TO_SIZE (Pages->NumberOfPages);
1017 if (!IsGuardPage (TailGuard)) {
1018 //
1019 // Add one if no Guard at the end of current free memory block.
1020 //
1021 PagesToAlloc += 1;
1022 TailGuard = 0;
1023 }
1024
1025 HeadGuard = (UINTN)Pages +
1026 EFI_PAGES_TO_SIZE (Pages->NumberOfPages - PagesToAlloc) -
1027 EFI_PAGE_SIZE;
1028 if (!IsGuardPage (HeadGuard)) {
1029 //
1030 // Add one if no Guard at the page before the address to allocate
1031 //
1032 PagesToAlloc += 1;
1033 HeadGuard = 0;
1034 }
1035
1036 if (Pages->NumberOfPages < PagesToAlloc) {
1037 // Not enough space to allocate memory with Guards? Try next block.
1038 continue;
1039 }
1040
1041 Address = InternalAllocPagesOnOneNode (Pages, PagesToAlloc, MaxAddress);
1042 ConvertSmmMemoryMapEntry (MemoryType, Address, PagesToAlloc, FALSE);
1043 CoreFreeMemoryMapStack ();
1044 if (HeadGuard == 0) {
1045 // Don't pass the Guard page to user.
1046 Address += EFI_PAGE_SIZE;
1047 }
1048
1049 SetGuardForMemory (Address, NumberOfPages);
1050 return Address;
1051 }
1052 }
1053
1054 return (UINTN)(-1);
1055 }
1056
1057 /**
1058 Helper function of memory free with Guard pages.
1059
1060 @param[in] Memory Base address of memory being freed.
1061 @param[in] NumberOfPages The number of pages to free.
1062 @param[in] AddRegion If this memory is new added region.
1063
1064 @retval EFI_NOT_FOUND Could not find the entry that covers the range.
1065 @retval EFI_INVALID_PARAMETER Address not aligned, Address is zero or NumberOfPages is zero.
1066 @return EFI_SUCCESS Pages successfully freed.
1067 **/
1068 EFI_STATUS
1069 SmmInternalFreePagesExWithGuard (
1070 IN EFI_PHYSICAL_ADDRESS Memory,
1071 IN UINTN NumberOfPages,
1072 IN BOOLEAN AddRegion
1073 )
1074 {
1075 EFI_PHYSICAL_ADDRESS MemoryToFree;
1076 UINTN PagesToFree;
1077
1078 if (((Memory & EFI_PAGE_MASK) != 0) || (Memory == 0) || (NumberOfPages == 0)) {
1079 return EFI_INVALID_PARAMETER;
1080 }
1081
1082 MemoryToFree = Memory;
1083 PagesToFree = NumberOfPages;
1084
1085 AdjustMemoryF (&MemoryToFree, &PagesToFree);
1086 UnsetGuardForMemory (Memory, NumberOfPages);
1087 if (PagesToFree == 0) {
1088 return EFI_SUCCESS;
1089 }
1090
1091 return SmmInternalFreePagesEx (MemoryToFree, PagesToFree, AddRegion);
1092 }
1093
1094 /**
1095 Set all Guard pages which cannot be set during the non-SMM mode time.
1096 **/
1097 VOID
1098 SetAllGuardPages (
1099 VOID
1100 )
1101 {
1102 UINTN Entries[GUARDED_HEAP_MAP_TABLE_DEPTH];
1103 UINTN Shifts[GUARDED_HEAP_MAP_TABLE_DEPTH];
1104 UINTN Indices[GUARDED_HEAP_MAP_TABLE_DEPTH];
1105 UINT64 Tables[GUARDED_HEAP_MAP_TABLE_DEPTH];
1106 UINT64 Addresses[GUARDED_HEAP_MAP_TABLE_DEPTH];
1107 UINT64 TableEntry;
1108 UINT64 Address;
1109 UINT64 GuardPage;
1110 INTN Level;
1111 UINTN Index;
1112 BOOLEAN OnGuarding;
1113
1114 if ((mGuardedMemoryMap == 0) ||
1115 (mMapLevel == 0) ||
1116 (mMapLevel > GUARDED_HEAP_MAP_TABLE_DEPTH))
1117 {
1118 return;
1119 }
1120
1121 CopyMem (Entries, mLevelMask, sizeof (Entries));
1122 CopyMem (Shifts, mLevelShift, sizeof (Shifts));
1123
1124 SetMem (Tables, sizeof (Tables), 0);
1125 SetMem (Addresses, sizeof (Addresses), 0);
1126 SetMem (Indices, sizeof (Indices), 0);
1127
1128 Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;
1129 Tables[Level] = mGuardedMemoryMap;
1130 Address = 0;
1131 OnGuarding = FALSE;
1132
1133 DEBUG_CODE (
1134 DumpGuardedMemoryBitmap ();
1135 );
1136
1137 while (TRUE) {
1138 if (Indices[Level] > Entries[Level]) {
1139 Tables[Level] = 0;
1140 Level -= 1;
1141 } else {
1142 TableEntry = ((UINT64 *)(UINTN)(Tables[Level]))[Indices[Level]];
1143 Address = Addresses[Level];
1144
1145 if (TableEntry == 0) {
1146 OnGuarding = FALSE;
1147 } else if (Level < GUARDED_HEAP_MAP_TABLE_DEPTH - 1) {
1148 Level += 1;
1149 Tables[Level] = TableEntry;
1150 Addresses[Level] = Address;
1151 Indices[Level] = 0;
1152
1153 continue;
1154 } else {
1155 Index = 0;
1156 while (Index < GUARDED_HEAP_MAP_ENTRY_BITS) {
1157 if ((TableEntry & 1) == 1) {
1158 if (OnGuarding) {
1159 GuardPage = 0;
1160 } else {
1161 GuardPage = Address - EFI_PAGE_SIZE;
1162 }
1163
1164 OnGuarding = TRUE;
1165 } else {
1166 if (OnGuarding) {
1167 GuardPage = Address;
1168 } else {
1169 GuardPage = 0;
1170 }
1171
1172 OnGuarding = FALSE;
1173 }
1174
1175 if (GuardPage != 0) {
1176 SetGuardPage (GuardPage);
1177 }
1178
1179 if (TableEntry == 0) {
1180 break;
1181 }
1182
1183 TableEntry = RShiftU64 (TableEntry, 1);
1184 Address += EFI_PAGE_SIZE;
1185 Index += 1;
1186 }
1187 }
1188 }
1189
1190 if (Level < (GUARDED_HEAP_MAP_TABLE_DEPTH - (INTN)mMapLevel)) {
1191 break;
1192 }
1193
1194 Indices[Level] += 1;
1195 Address = (Level == 0) ? 0 : Addresses[Level - 1];
1196 Addresses[Level] = Address | LShiftU64 (Indices[Level], Shifts[Level]);
1197 }
1198 }
1199
1200 /**
1201 Hook function used to set all Guard pages after entering SMM mode.
1202 **/
1203 VOID
1204 SmmEntryPointMemoryManagementHook (
1205 VOID
1206 )
1207 {
1208 EFI_STATUS Status;
1209
1210 if (mSmmMemoryAttribute == NULL) {
1211 Status = SmmLocateProtocol (
1212 &gEdkiiSmmMemoryAttributeProtocolGuid,
1213 NULL,
1214 (VOID **)&mSmmMemoryAttribute
1215 );
1216 if (!EFI_ERROR (Status)) {
1217 SetAllGuardPages ();
1218 }
1219 }
1220 }
1221
1222 /**
1223 Helper function to convert a UINT64 value in binary to a string.
1224
1225 @param[in] Value Value of a UINT64 integer.
1226 @param[out] BinString String buffer to contain the conversion result.
1227
1228 @return VOID.
1229 **/
1230 VOID
1231 Uint64ToBinString (
1232 IN UINT64 Value,
1233 OUT CHAR8 *BinString
1234 )
1235 {
1236 UINTN Index;
1237
1238 if (BinString == NULL) {
1239 return;
1240 }
1241
1242 for (Index = 64; Index > 0; --Index) {
1243 BinString[Index - 1] = '0' + (Value & 1);
1244 Value = RShiftU64 (Value, 1);
1245 }
1246
1247 BinString[64] = '\0';
1248 }
1249
1250 /**
1251 Dump the guarded memory bit map.
1252 **/
1253 VOID
1254 EFIAPI
1255 DumpGuardedMemoryBitmap (
1256 VOID
1257 )
1258 {
1259 UINTN Entries[GUARDED_HEAP_MAP_TABLE_DEPTH];
1260 UINTN Shifts[GUARDED_HEAP_MAP_TABLE_DEPTH];
1261 UINTN Indices[GUARDED_HEAP_MAP_TABLE_DEPTH];
1262 UINT64 Tables[GUARDED_HEAP_MAP_TABLE_DEPTH];
1263 UINT64 Addresses[GUARDED_HEAP_MAP_TABLE_DEPTH];
1264 UINT64 TableEntry;
1265 UINT64 Address;
1266 INTN Level;
1267 UINTN RepeatZero;
1268 CHAR8 String[GUARDED_HEAP_MAP_ENTRY_BITS + 1];
1269 CHAR8 *Ruler1;
1270 CHAR8 *Ruler2;
1271
1272 if ((mGuardedMemoryMap == 0) ||
1273 (mMapLevel == 0) ||
1274 (mMapLevel > GUARDED_HEAP_MAP_TABLE_DEPTH))
1275 {
1276 return;
1277 }
1278
1279 Ruler1 = " 3 2 1 0";
1280 Ruler2 = "FEDCBA9876543210FEDCBA9876543210FEDCBA9876543210FEDCBA9876543210";
1281
1282 DEBUG ((
1283 HEAP_GUARD_DEBUG_LEVEL,
1284 "============================="
1285 " Guarded Memory Bitmap "
1286 "==============================\r\n"
1287 ));
1288 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, " %a\r\n", Ruler1));
1289 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, " %a\r\n", Ruler2));
1290
1291 CopyMem (Entries, mLevelMask, sizeof (Entries));
1292 CopyMem (Shifts, mLevelShift, sizeof (Shifts));
1293
1294 SetMem (Indices, sizeof (Indices), 0);
1295 SetMem (Tables, sizeof (Tables), 0);
1296 SetMem (Addresses, sizeof (Addresses), 0);
1297
1298 Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;
1299 Tables[Level] = mGuardedMemoryMap;
1300 Address = 0;
1301 RepeatZero = 0;
1302
1303 while (TRUE) {
1304 if (Indices[Level] > Entries[Level]) {
1305 Tables[Level] = 0;
1306 Level -= 1;
1307 RepeatZero = 0;
1308
1309 DEBUG ((
1310 HEAP_GUARD_DEBUG_LEVEL,
1311 "========================================="
1312 "=========================================\r\n"
1313 ));
1314 } else {
1315 TableEntry = ((UINT64 *)(UINTN)Tables[Level])[Indices[Level]];
1316 Address = Addresses[Level];
1317
1318 if (TableEntry == 0) {
1319 if (Level == GUARDED_HEAP_MAP_TABLE_DEPTH - 1) {
1320 if (RepeatZero == 0) {
1321 Uint64ToBinString (TableEntry, String);
1322 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "%016lx: %a\r\n", Address, String));
1323 } else if (RepeatZero == 1) {
1324 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "... : ...\r\n"));
1325 }
1326
1327 RepeatZero += 1;
1328 }
1329 } else if (Level < GUARDED_HEAP_MAP_TABLE_DEPTH - 1) {
1330 Level += 1;
1331 Tables[Level] = TableEntry;
1332 Addresses[Level] = Address;
1333 Indices[Level] = 0;
1334 RepeatZero = 0;
1335
1336 continue;
1337 } else {
1338 RepeatZero = 0;
1339 Uint64ToBinString (TableEntry, String);
1340 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "%016lx: %a\r\n", Address, String));
1341 }
1342 }
1343
1344 if (Level < (GUARDED_HEAP_MAP_TABLE_DEPTH - (INTN)mMapLevel)) {
1345 break;
1346 }
1347
1348 Indices[Level] += 1;
1349 Address = (Level == 0) ? 0 : Addresses[Level - 1];
1350 Addresses[Level] = Address | LShiftU64 (Indices[Level], Shifts[Level]);
1351 }
1352 }
1353
1354 /**
1355 Debug function used to verify if the Guard page is well set or not.
1356
1357 @param[in] BaseAddress Address of memory to check.
1358 @param[in] NumberOfPages Size of memory in pages.
1359
1360 @return TRUE The head Guard and tail Guard are both well set.
1361 @return FALSE The head Guard and/or tail Guard are not well set.
1362 **/
1363 BOOLEAN
1364 VerifyMemoryGuard (
1365 IN EFI_PHYSICAL_ADDRESS BaseAddress,
1366 IN UINTN NumberOfPages
1367 )
1368 {
1369 EFI_STATUS Status;
1370 UINT64 Attribute;
1371 EFI_PHYSICAL_ADDRESS Address;
1372
1373 if (mSmmMemoryAttribute == NULL) {
1374 return TRUE;
1375 }
1376
1377 Attribute = 0;
1378 Address = BaseAddress - EFI_PAGE_SIZE;
1379 Status = mSmmMemoryAttribute->GetMemoryAttributes (
1380 mSmmMemoryAttribute,
1381 Address,
1382 EFI_PAGE_SIZE,
1383 &Attribute
1384 );
1385 if (EFI_ERROR (Status) || ((Attribute & EFI_MEMORY_RP) == 0)) {
1386 DEBUG ((
1387 DEBUG_ERROR,
1388 "Head Guard is not set at: %016lx (%016lX)!!!\r\n",
1389 Address,
1390 Attribute
1391 ));
1392 DumpGuardedMemoryBitmap ();
1393 return FALSE;
1394 }
1395
1396 Attribute = 0;
1397 Address = BaseAddress + EFI_PAGES_TO_SIZE (NumberOfPages);
1398 Status = mSmmMemoryAttribute->GetMemoryAttributes (
1399 mSmmMemoryAttribute,
1400 Address,
1401 EFI_PAGE_SIZE,
1402 &Attribute
1403 );
1404 if (EFI_ERROR (Status) || ((Attribute & EFI_MEMORY_RP) == 0)) {
1405 DEBUG ((
1406 DEBUG_ERROR,
1407 "Tail Guard is not set at: %016lx (%016lX)!!!\r\n",
1408 Address,
1409 Attribute
1410 ));
1411 DumpGuardedMemoryBitmap ();
1412 return FALSE;
1413 }
1414
1415 return TRUE;
1416 }