]> git.proxmox.com Git - mirror_edk2.git/blob - MdeModulePkg/Core/PiSmmCore/HeapGuard.c
MdeModulePkg PiSmmCore: Remove redundant functions
[mirror_edk2.git] / MdeModulePkg / Core / PiSmmCore / HeapGuard.c
1 /** @file
2 UEFI Heap Guard functions.
3
4 Copyright (c) 2017-2018, Intel Corporation. All rights reserved.<BR>
5 This program and the accompanying materials
6 are licensed and made available under the terms and conditions of the BSD License
7 which accompanies this distribution. The full text of the license may be found at
8 http://opensource.org/licenses/bsd-license.php
9
10 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
11 WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
12
13 **/
14
15 #include "HeapGuard.h"
16
17 //
18 // Global to avoid infinite reentrance of memory allocation when updating
19 // page table attributes, which may need allocating pages for new PDE/PTE.
20 //
21 GLOBAL_REMOVE_IF_UNREFERENCED BOOLEAN mOnGuarding = FALSE;
22
23 //
24 // Pointer to table tracking the Guarded memory with bitmap, in which '1'
25 // is used to indicate memory guarded. '0' might be free memory or Guard
26 // page itself, depending on status of memory adjacent to it.
27 //
28 GLOBAL_REMOVE_IF_UNREFERENCED UINT64 mGuardedMemoryMap = 0;
29
30 //
31 // Current depth level of map table pointed by mGuardedMemoryMap.
32 // mMapLevel must be initialized at least by 1. It will be automatically
33 // updated according to the address of memory just tracked.
34 //
35 GLOBAL_REMOVE_IF_UNREFERENCED UINTN mMapLevel = 1;
36
37 //
38 // Shift and mask for each level of map table
39 //
40 GLOBAL_REMOVE_IF_UNREFERENCED UINTN mLevelShift[GUARDED_HEAP_MAP_TABLE_DEPTH]
41 = GUARDED_HEAP_MAP_TABLE_DEPTH_SHIFTS;
42 GLOBAL_REMOVE_IF_UNREFERENCED UINTN mLevelMask[GUARDED_HEAP_MAP_TABLE_DEPTH]
43 = GUARDED_HEAP_MAP_TABLE_DEPTH_MASKS;
44
45 //
46 // SMM memory attribute protocol
47 //
48 EDKII_SMM_MEMORY_ATTRIBUTE_PROTOCOL *mSmmMemoryAttribute = NULL;
49
50 /**
51 Set corresponding bits in bitmap table to 1 according to the address.
52
53 @param[in] Address Start address to set for.
54 @param[in] BitNumber Number of bits to set.
55 @param[in] BitMap Pointer to bitmap which covers the Address.
56
57 @return VOID
58 **/
59 STATIC
60 VOID
61 SetBits (
62 IN EFI_PHYSICAL_ADDRESS Address,
63 IN UINTN BitNumber,
64 IN UINT64 *BitMap
65 )
66 {
67 UINTN Lsbs;
68 UINTN Qwords;
69 UINTN Msbs;
70 UINTN StartBit;
71 UINTN EndBit;
72
73 StartBit = (UINTN)GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address);
74 EndBit = (StartBit + BitNumber - 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
75
76 if ((StartBit + BitNumber) >= GUARDED_HEAP_MAP_ENTRY_BITS) {
77 Msbs = (GUARDED_HEAP_MAP_ENTRY_BITS - StartBit) %
78 GUARDED_HEAP_MAP_ENTRY_BITS;
79 Lsbs = (EndBit + 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
80 Qwords = (BitNumber - Msbs) / GUARDED_HEAP_MAP_ENTRY_BITS;
81 } else {
82 Msbs = BitNumber;
83 Lsbs = 0;
84 Qwords = 0;
85 }
86
87 if (Msbs > 0) {
88 *BitMap |= LShiftU64 (LShiftU64 (1, Msbs) - 1, StartBit);
89 BitMap += 1;
90 }
91
92 if (Qwords > 0) {
93 SetMem64 ((VOID *)BitMap, Qwords * GUARDED_HEAP_MAP_ENTRY_BYTES,
94 (UINT64)-1);
95 BitMap += Qwords;
96 }
97
98 if (Lsbs > 0) {
99 *BitMap |= (LShiftU64 (1, Lsbs) - 1);
100 }
101 }
102
103 /**
104 Set corresponding bits in bitmap table to 0 according to the address.
105
106 @param[in] Address Start address to set for.
107 @param[in] BitNumber Number of bits to set.
108 @param[in] BitMap Pointer to bitmap which covers the Address.
109
110 @return VOID.
111 **/
112 STATIC
113 VOID
114 ClearBits (
115 IN EFI_PHYSICAL_ADDRESS Address,
116 IN UINTN BitNumber,
117 IN UINT64 *BitMap
118 )
119 {
120 UINTN Lsbs;
121 UINTN Qwords;
122 UINTN Msbs;
123 UINTN StartBit;
124 UINTN EndBit;
125
126 StartBit = (UINTN)GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address);
127 EndBit = (StartBit + BitNumber - 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
128
129 if ((StartBit + BitNumber) >= GUARDED_HEAP_MAP_ENTRY_BITS) {
130 Msbs = (GUARDED_HEAP_MAP_ENTRY_BITS - StartBit) %
131 GUARDED_HEAP_MAP_ENTRY_BITS;
132 Lsbs = (EndBit + 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
133 Qwords = (BitNumber - Msbs) / GUARDED_HEAP_MAP_ENTRY_BITS;
134 } else {
135 Msbs = BitNumber;
136 Lsbs = 0;
137 Qwords = 0;
138 }
139
140 if (Msbs > 0) {
141 *BitMap &= ~LShiftU64 (LShiftU64 (1, Msbs) - 1, StartBit);
142 BitMap += 1;
143 }
144
145 if (Qwords > 0) {
146 SetMem64 ((VOID *)BitMap, Qwords * GUARDED_HEAP_MAP_ENTRY_BYTES, 0);
147 BitMap += Qwords;
148 }
149
150 if (Lsbs > 0) {
151 *BitMap &= ~(LShiftU64 (1, Lsbs) - 1);
152 }
153 }
154
155 /**
156 Get corresponding bits in bitmap table according to the address.
157
158 The value of bit 0 corresponds to the status of memory at given Address.
159 No more than 64 bits can be retrieved in one call.
160
161 @param[in] Address Start address to retrieve bits for.
162 @param[in] BitNumber Number of bits to get.
163 @param[in] BitMap Pointer to bitmap which covers the Address.
164
165 @return An integer containing the bits information.
166 **/
167 STATIC
168 UINT64
169 GetBits (
170 IN EFI_PHYSICAL_ADDRESS Address,
171 IN UINTN BitNumber,
172 IN UINT64 *BitMap
173 )
174 {
175 UINTN StartBit;
176 UINTN EndBit;
177 UINTN Lsbs;
178 UINTN Msbs;
179 UINT64 Result;
180
181 ASSERT (BitNumber <= GUARDED_HEAP_MAP_ENTRY_BITS);
182
183 StartBit = (UINTN)GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address);
184 EndBit = (StartBit + BitNumber - 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
185
186 if ((StartBit + BitNumber) > GUARDED_HEAP_MAP_ENTRY_BITS) {
187 Msbs = GUARDED_HEAP_MAP_ENTRY_BITS - StartBit;
188 Lsbs = (EndBit + 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
189 } else {
190 Msbs = BitNumber;
191 Lsbs = 0;
192 }
193
194 if (StartBit == 0 && BitNumber == GUARDED_HEAP_MAP_ENTRY_BITS) {
195 Result = *BitMap;
196 } else {
197 Result = RShiftU64((*BitMap), StartBit) & (LShiftU64(1, Msbs) - 1);
198 if (Lsbs > 0) {
199 BitMap += 1;
200 Result |= LShiftU64 ((*BitMap) & (LShiftU64 (1, Lsbs) - 1), Msbs);
201 }
202 }
203
204 return Result;
205 }
206
207 /**
208 Helper function to allocate pages without Guard for internal uses.
209
210 @param[in] Pages Page number.
211
212 @return Address of memory allocated.
213 **/
214 VOID *
215 PageAlloc (
216 IN UINTN Pages
217 )
218 {
219 EFI_STATUS Status;
220 EFI_PHYSICAL_ADDRESS Memory;
221
222 Status = SmmInternalAllocatePages (AllocateAnyPages, EfiRuntimeServicesData,
223 Pages, &Memory, FALSE);
224 if (EFI_ERROR (Status)) {
225 Memory = 0;
226 }
227
228 return (VOID *)(UINTN)Memory;
229 }
230
231 /**
232 Locate the pointer of bitmap from the guarded memory bitmap tables, which
233 covers the given Address.
234
235 @param[in] Address Start address to search the bitmap for.
236 @param[in] AllocMapUnit Flag to indicate memory allocation for the table.
237 @param[out] BitMap Pointer to bitmap which covers the Address.
238
239 @return The bit number from given Address to the end of current map table.
240 **/
241 UINTN
242 FindGuardedMemoryMap (
243 IN EFI_PHYSICAL_ADDRESS Address,
244 IN BOOLEAN AllocMapUnit,
245 OUT UINT64 **BitMap
246 )
247 {
248 UINTN Level;
249 UINT64 *GuardMap;
250 UINT64 MapMemory;
251 UINTN Index;
252 UINTN Size;
253 UINTN BitsToUnitEnd;
254
255 //
256 // Adjust current map table depth according to the address to access
257 //
258 while (AllocMapUnit &&
259 mMapLevel < GUARDED_HEAP_MAP_TABLE_DEPTH &&
260 RShiftU64 (
261 Address,
262 mLevelShift[GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel - 1]
263 ) != 0) {
264
265 if (mGuardedMemoryMap != 0) {
266 Size = (mLevelMask[GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel - 1] + 1)
267 * GUARDED_HEAP_MAP_ENTRY_BYTES;
268 MapMemory = (UINT64)(UINTN)PageAlloc (EFI_SIZE_TO_PAGES (Size));
269 ASSERT (MapMemory != 0);
270
271 SetMem ((VOID *)(UINTN)MapMemory, Size, 0);
272
273 *(UINT64 *)(UINTN)MapMemory = mGuardedMemoryMap;
274 mGuardedMemoryMap = MapMemory;
275 }
276
277 mMapLevel++;
278
279 }
280
281 GuardMap = &mGuardedMemoryMap;
282 for (Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;
283 Level < GUARDED_HEAP_MAP_TABLE_DEPTH;
284 ++Level) {
285
286 if (*GuardMap == 0) {
287 if (!AllocMapUnit) {
288 GuardMap = NULL;
289 break;
290 }
291
292 Size = (mLevelMask[Level] + 1) * GUARDED_HEAP_MAP_ENTRY_BYTES;
293 MapMemory = (UINT64)(UINTN)PageAlloc (EFI_SIZE_TO_PAGES (Size));
294 ASSERT (MapMemory != 0);
295
296 SetMem ((VOID *)(UINTN)MapMemory, Size, 0);
297 *GuardMap = MapMemory;
298 }
299
300 Index = (UINTN)RShiftU64 (Address, mLevelShift[Level]);
301 Index &= mLevelMask[Level];
302 GuardMap = (UINT64 *)(UINTN)((*GuardMap) + Index * sizeof (UINT64));
303
304 }
305
306 BitsToUnitEnd = GUARDED_HEAP_MAP_BITS - GUARDED_HEAP_MAP_BIT_INDEX (Address);
307 *BitMap = GuardMap;
308
309 return BitsToUnitEnd;
310 }
311
312 /**
313 Set corresponding bits in bitmap table to 1 according to given memory range.
314
315 @param[in] Address Memory address to guard from.
316 @param[in] NumberOfPages Number of pages to guard.
317
318 @return VOID
319 **/
320 VOID
321 EFIAPI
322 SetGuardedMemoryBits (
323 IN EFI_PHYSICAL_ADDRESS Address,
324 IN UINTN NumberOfPages
325 )
326 {
327 UINT64 *BitMap;
328 UINTN Bits;
329 UINTN BitsToUnitEnd;
330
331 while (NumberOfPages > 0) {
332 BitsToUnitEnd = FindGuardedMemoryMap (Address, TRUE, &BitMap);
333 ASSERT (BitMap != NULL);
334
335 if (NumberOfPages > BitsToUnitEnd) {
336 // Cross map unit
337 Bits = BitsToUnitEnd;
338 } else {
339 Bits = NumberOfPages;
340 }
341
342 SetBits (Address, Bits, BitMap);
343
344 NumberOfPages -= Bits;
345 Address += EFI_PAGES_TO_SIZE (Bits);
346 }
347 }
348
349 /**
350 Clear corresponding bits in bitmap table according to given memory range.
351
352 @param[in] Address Memory address to unset from.
353 @param[in] NumberOfPages Number of pages to unset guard.
354
355 @return VOID
356 **/
357 VOID
358 EFIAPI
359 ClearGuardedMemoryBits (
360 IN EFI_PHYSICAL_ADDRESS Address,
361 IN UINTN NumberOfPages
362 )
363 {
364 UINT64 *BitMap;
365 UINTN Bits;
366 UINTN BitsToUnitEnd;
367
368 while (NumberOfPages > 0) {
369 BitsToUnitEnd = FindGuardedMemoryMap (Address, TRUE, &BitMap);
370 ASSERT (BitMap != NULL);
371
372 if (NumberOfPages > BitsToUnitEnd) {
373 // Cross map unit
374 Bits = BitsToUnitEnd;
375 } else {
376 Bits = NumberOfPages;
377 }
378
379 ClearBits (Address, Bits, BitMap);
380
381 NumberOfPages -= Bits;
382 Address += EFI_PAGES_TO_SIZE (Bits);
383 }
384 }
385
386 /**
387 Retrieve corresponding bits in bitmap table according to given memory range.
388
389 @param[in] Address Memory address to retrieve from.
390 @param[in] NumberOfPages Number of pages to retrieve.
391
392 @return An integer containing the guarded memory bitmap.
393 **/
394 UINTN
395 GetGuardedMemoryBits (
396 IN EFI_PHYSICAL_ADDRESS Address,
397 IN UINTN NumberOfPages
398 )
399 {
400 UINT64 *BitMap;
401 UINTN Bits;
402 UINTN Result;
403 UINTN Shift;
404 UINTN BitsToUnitEnd;
405
406 ASSERT (NumberOfPages <= GUARDED_HEAP_MAP_ENTRY_BITS);
407
408 Result = 0;
409 Shift = 0;
410 while (NumberOfPages > 0) {
411 BitsToUnitEnd = FindGuardedMemoryMap (Address, FALSE, &BitMap);
412
413 if (NumberOfPages > BitsToUnitEnd) {
414 // Cross map unit
415 Bits = BitsToUnitEnd;
416 } else {
417 Bits = NumberOfPages;
418 }
419
420 if (BitMap != NULL) {
421 Result |= LShiftU64 (GetBits (Address, Bits, BitMap), Shift);
422 }
423
424 Shift += Bits;
425 NumberOfPages -= Bits;
426 Address += EFI_PAGES_TO_SIZE (Bits);
427 }
428
429 return Result;
430 }
431
432 /**
433 Get bit value in bitmap table for the given address.
434
435 @param[in] Address The address to retrieve for.
436
437 @return 1 or 0.
438 **/
439 UINTN
440 EFIAPI
441 GetGuardMapBit (
442 IN EFI_PHYSICAL_ADDRESS Address
443 )
444 {
445 UINT64 *GuardMap;
446
447 FindGuardedMemoryMap (Address, FALSE, &GuardMap);
448 if (GuardMap != NULL) {
449 if (RShiftU64 (*GuardMap,
450 GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address)) & 1) {
451 return 1;
452 }
453 }
454
455 return 0;
456 }
457
458
459 /**
460 Check to see if the page at the given address is a Guard page or not.
461
462 @param[in] Address The address to check for.
463
464 @return TRUE The page at Address is a Guard page.
465 @return FALSE The page at Address is not a Guard page.
466 **/
467 BOOLEAN
468 EFIAPI
469 IsGuardPage (
470 IN EFI_PHYSICAL_ADDRESS Address
471 )
472 {
473 UINTN BitMap;
474
475 //
476 // There must be at least one guarded page before and/or after given
477 // address if it's a Guard page. The bitmap pattern should be one of
478 // 001, 100 and 101
479 //
480 BitMap = GetGuardedMemoryBits (Address - EFI_PAGE_SIZE, 3);
481 return ((BitMap == BIT0) || (BitMap == BIT2) || (BitMap == (BIT2 | BIT0)));
482 }
483
484
485
486 /**
487 Check to see if the page at the given address is guarded or not.
488
489 @param[in] Address The address to check for.
490
491 @return TRUE The page at Address is guarded.
492 @return FALSE The page at Address is not guarded.
493 **/
494 BOOLEAN
495 EFIAPI
496 IsMemoryGuarded (
497 IN EFI_PHYSICAL_ADDRESS Address
498 )
499 {
500 return (GetGuardMapBit (Address) == 1);
501 }
502
503 /**
504 Set the page at the given address to be a Guard page.
505
506 This is done by changing the page table attribute to be NOT PRSENT.
507
508 @param[in] BaseAddress Page address to Guard at.
509
510 @return VOID.
511 **/
512 VOID
513 EFIAPI
514 SetGuardPage (
515 IN EFI_PHYSICAL_ADDRESS BaseAddress
516 )
517 {
518 EFI_STATUS Status;
519
520 if (mSmmMemoryAttribute != NULL) {
521 mOnGuarding = TRUE;
522 Status = mSmmMemoryAttribute->SetMemoryAttributes (
523 mSmmMemoryAttribute,
524 BaseAddress,
525 EFI_PAGE_SIZE,
526 EFI_MEMORY_RP
527 );
528 ASSERT_EFI_ERROR (Status);
529 mOnGuarding = FALSE;
530 }
531 }
532
533 /**
534 Unset the Guard page at the given address to the normal memory.
535
536 This is done by changing the page table attribute to be PRSENT.
537
538 @param[in] BaseAddress Page address to Guard at.
539
540 @return VOID.
541 **/
542 VOID
543 EFIAPI
544 UnsetGuardPage (
545 IN EFI_PHYSICAL_ADDRESS BaseAddress
546 )
547 {
548 EFI_STATUS Status;
549
550 if (mSmmMemoryAttribute != NULL) {
551 mOnGuarding = TRUE;
552 Status = mSmmMemoryAttribute->ClearMemoryAttributes (
553 mSmmMemoryAttribute,
554 BaseAddress,
555 EFI_PAGE_SIZE,
556 EFI_MEMORY_RP
557 );
558 ASSERT_EFI_ERROR (Status);
559 mOnGuarding = FALSE;
560 }
561 }
562
563 /**
564 Check to see if the memory at the given address should be guarded or not.
565
566 @param[in] MemoryType Memory type to check.
567 @param[in] AllocateType Allocation type to check.
568 @param[in] PageOrPool Indicate a page allocation or pool allocation.
569
570
571 @return TRUE The given type of memory should be guarded.
572 @return FALSE The given type of memory should not be guarded.
573 **/
574 BOOLEAN
575 IsMemoryTypeToGuard (
576 IN EFI_MEMORY_TYPE MemoryType,
577 IN EFI_ALLOCATE_TYPE AllocateType,
578 IN UINT8 PageOrPool
579 )
580 {
581 UINT64 TestBit;
582 UINT64 ConfigBit;
583
584 if ((PcdGet8 (PcdHeapGuardPropertyMask) & PageOrPool) == 0
585 || mOnGuarding
586 || AllocateType == AllocateAddress) {
587 return FALSE;
588 }
589
590 ConfigBit = 0;
591 if ((PageOrPool & GUARD_HEAP_TYPE_POOL) != 0) {
592 ConfigBit |= PcdGet64 (PcdHeapGuardPoolType);
593 }
594
595 if ((PageOrPool & GUARD_HEAP_TYPE_PAGE) != 0) {
596 ConfigBit |= PcdGet64 (PcdHeapGuardPageType);
597 }
598
599 if (MemoryType == EfiRuntimeServicesData ||
600 MemoryType == EfiRuntimeServicesCode) {
601 TestBit = LShiftU64 (1, MemoryType);
602 } else if (MemoryType == EfiMaxMemoryType) {
603 TestBit = (UINT64)-1;
604 } else {
605 TestBit = 0;
606 }
607
608 return ((ConfigBit & TestBit) != 0);
609 }
610
611 /**
612 Check to see if the pool at the given address should be guarded or not.
613
614 @param[in] MemoryType Pool type to check.
615
616
617 @return TRUE The given type of pool should be guarded.
618 @return FALSE The given type of pool should not be guarded.
619 **/
620 BOOLEAN
621 IsPoolTypeToGuard (
622 IN EFI_MEMORY_TYPE MemoryType
623 )
624 {
625 return IsMemoryTypeToGuard (MemoryType, AllocateAnyPages,
626 GUARD_HEAP_TYPE_POOL);
627 }
628
629 /**
630 Check to see if the page at the given address should be guarded or not.
631
632 @param[in] MemoryType Page type to check.
633 @param[in] AllocateType Allocation type to check.
634
635 @return TRUE The given type of page should be guarded.
636 @return FALSE The given type of page should not be guarded.
637 **/
638 BOOLEAN
639 IsPageTypeToGuard (
640 IN EFI_MEMORY_TYPE MemoryType,
641 IN EFI_ALLOCATE_TYPE AllocateType
642 )
643 {
644 return IsMemoryTypeToGuard (MemoryType, AllocateType, GUARD_HEAP_TYPE_PAGE);
645 }
646
647 /**
648 Check to see if the heap guard is enabled for page and/or pool allocation.
649
650 @return TRUE/FALSE.
651 **/
652 BOOLEAN
653 IsHeapGuardEnabled (
654 VOID
655 )
656 {
657 return IsMemoryTypeToGuard (EfiMaxMemoryType, AllocateAnyPages,
658 GUARD_HEAP_TYPE_POOL|GUARD_HEAP_TYPE_PAGE);
659 }
660
661 /**
662 Set head Guard and tail Guard for the given memory range.
663
664 @param[in] Memory Base address of memory to set guard for.
665 @param[in] NumberOfPages Memory size in pages.
666
667 @return VOID.
668 **/
669 VOID
670 SetGuardForMemory (
671 IN EFI_PHYSICAL_ADDRESS Memory,
672 IN UINTN NumberOfPages
673 )
674 {
675 EFI_PHYSICAL_ADDRESS GuardPage;
676
677 //
678 // Set tail Guard
679 //
680 GuardPage = Memory + EFI_PAGES_TO_SIZE (NumberOfPages);
681 if (!IsGuardPage (GuardPage)) {
682 SetGuardPage (GuardPage);
683 }
684
685 // Set head Guard
686 GuardPage = Memory - EFI_PAGES_TO_SIZE (1);
687 if (!IsGuardPage (GuardPage)) {
688 SetGuardPage (GuardPage);
689 }
690
691 //
692 // Mark the memory range as Guarded
693 //
694 SetGuardedMemoryBits (Memory, NumberOfPages);
695 }
696
697 /**
698 Unset head Guard and tail Guard for the given memory range.
699
700 @param[in] Memory Base address of memory to unset guard for.
701 @param[in] NumberOfPages Memory size in pages.
702
703 @return VOID.
704 **/
705 VOID
706 UnsetGuardForMemory (
707 IN EFI_PHYSICAL_ADDRESS Memory,
708 IN UINTN NumberOfPages
709 )
710 {
711 EFI_PHYSICAL_ADDRESS GuardPage;
712 UINT64 GuardBitmap;
713
714 if (NumberOfPages == 0) {
715 return;
716 }
717
718 //
719 // Head Guard must be one page before, if any.
720 //
721 // MSB-> 1 0 <-LSB
722 // -------------------
723 // Head Guard -> 0 1 -> Don't free Head Guard (shared Guard)
724 // Head Guard -> 0 0 -> Free Head Guard either (not shared Guard)
725 // 1 X -> Don't free first page (need a new Guard)
726 // (it'll be turned into a Guard page later)
727 // -------------------
728 // Start -> -1 -2
729 //
730 GuardPage = Memory - EFI_PAGES_TO_SIZE (1);
731 GuardBitmap = GetGuardedMemoryBits (Memory - EFI_PAGES_TO_SIZE (2), 2);
732 if ((GuardBitmap & BIT1) == 0) {
733 //
734 // Head Guard exists.
735 //
736 if ((GuardBitmap & BIT0) == 0) {
737 //
738 // If the head Guard is not a tail Guard of adjacent memory block,
739 // unset it.
740 //
741 UnsetGuardPage (GuardPage);
742 }
743 } else {
744 //
745 // Pages before memory to free are still in Guard. It's a partial free
746 // case. Turn first page of memory block to free into a new Guard.
747 //
748 SetGuardPage (Memory);
749 }
750
751 //
752 // Tail Guard must be the page after this memory block to free, if any.
753 //
754 // MSB-> 1 0 <-LSB
755 // --------------------
756 // 1 0 <- Tail Guard -> Don't free Tail Guard (shared Guard)
757 // 0 0 <- Tail Guard -> Free Tail Guard either (not shared Guard)
758 // X 1 -> Don't free last page (need a new Guard)
759 // (it'll be turned into a Guard page later)
760 // --------------------
761 // +1 +0 <- End
762 //
763 GuardPage = Memory + EFI_PAGES_TO_SIZE (NumberOfPages);
764 GuardBitmap = GetGuardedMemoryBits (GuardPage, 2);
765 if ((GuardBitmap & BIT0) == 0) {
766 //
767 // Tail Guard exists.
768 //
769 if ((GuardBitmap & BIT1) == 0) {
770 //
771 // If the tail Guard is not a head Guard of adjacent memory block,
772 // free it; otherwise, keep it.
773 //
774 UnsetGuardPage (GuardPage);
775 }
776 } else {
777 //
778 // Pages after memory to free are still in Guard. It's a partial free
779 // case. We need to keep one page to be a head Guard.
780 //
781 SetGuardPage (GuardPage - EFI_PAGES_TO_SIZE (1));
782 }
783
784 //
785 // No matter what, we just clear the mark of the Guarded memory.
786 //
787 ClearGuardedMemoryBits(Memory, NumberOfPages);
788 }
789
790
791
792 /**
793 Adjust the start address and number of pages to free according to Guard.
794
795 The purpose of this function is to keep the shared Guard page with adjacent
796 memory block if it's still in guard, or free it if no more sharing. Another
797 is to reserve pages as Guard pages in partial page free situation.
798
799 @param[in,out] Memory Base address of memory to free.
800 @param[in,out] NumberOfPages Size of memory to free.
801
802 @return VOID.
803 **/
804 VOID
805 AdjustMemoryF (
806 IN OUT EFI_PHYSICAL_ADDRESS *Memory,
807 IN OUT UINTN *NumberOfPages
808 )
809 {
810 EFI_PHYSICAL_ADDRESS Start;
811 EFI_PHYSICAL_ADDRESS MemoryToTest;
812 UINTN PagesToFree;
813 UINT64 GuardBitmap;
814 UINT64 Attributes;
815
816 if (Memory == NULL || NumberOfPages == NULL || *NumberOfPages == 0) {
817 return;
818 }
819
820 Start = *Memory;
821 PagesToFree = *NumberOfPages;
822
823 //
824 // In case the memory to free is marked as read-only (e.g. EfiRuntimeServicesCode).
825 //
826 if (mSmmMemoryAttribute != NULL) {
827 Attributes = 0;
828 mSmmMemoryAttribute->GetMemoryAttributes (
829 mSmmMemoryAttribute,
830 Start,
831 EFI_PAGES_TO_SIZE (PagesToFree),
832 &Attributes
833 );
834 if ((Attributes & EFI_MEMORY_RO) != 0) {
835 mSmmMemoryAttribute->ClearMemoryAttributes (
836 mSmmMemoryAttribute,
837 Start,
838 EFI_PAGES_TO_SIZE (PagesToFree),
839 EFI_MEMORY_RO
840 );
841 }
842 }
843
844 //
845 // Head Guard must be one page before, if any.
846 //
847 // MSB-> 1 0 <-LSB
848 // -------------------
849 // Head Guard -> 0 1 -> Don't free Head Guard (shared Guard)
850 // Head Guard -> 0 0 -> Free Head Guard either (not shared Guard)
851 // 1 X -> Don't free first page (need a new Guard)
852 // (it'll be turned into a Guard page later)
853 // -------------------
854 // Start -> -1 -2
855 //
856 MemoryToTest = Start - EFI_PAGES_TO_SIZE (2);
857 GuardBitmap = GetGuardedMemoryBits (MemoryToTest, 2);
858 if ((GuardBitmap & BIT1) == 0) {
859 //
860 // Head Guard exists.
861 //
862 if ((GuardBitmap & BIT0) == 0) {
863 //
864 // If the head Guard is not a tail Guard of adjacent memory block,
865 // free it; otherwise, keep it.
866 //
867 Start -= EFI_PAGES_TO_SIZE (1);
868 PagesToFree += 1;
869 }
870 } else {
871 //
872 // No Head Guard, and pages before memory to free are still in Guard. It's a
873 // partial free case. We need to keep one page to be a tail Guard.
874 //
875 Start += EFI_PAGES_TO_SIZE (1);
876 PagesToFree -= 1;
877 }
878
879 //
880 // Tail Guard must be the page after this memory block to free, if any.
881 //
882 // MSB-> 1 0 <-LSB
883 // --------------------
884 // 1 0 <- Tail Guard -> Don't free Tail Guard (shared Guard)
885 // 0 0 <- Tail Guard -> Free Tail Guard either (not shared Guard)
886 // X 1 -> Don't free last page (need a new Guard)
887 // (it'll be turned into a Guard page later)
888 // --------------------
889 // +1 +0 <- End
890 //
891 MemoryToTest = Start + EFI_PAGES_TO_SIZE (PagesToFree);
892 GuardBitmap = GetGuardedMemoryBits (MemoryToTest, 2);
893 if ((GuardBitmap & BIT0) == 0) {
894 //
895 // Tail Guard exists.
896 //
897 if ((GuardBitmap & BIT1) == 0) {
898 //
899 // If the tail Guard is not a head Guard of adjacent memory block,
900 // free it; otherwise, keep it.
901 //
902 PagesToFree += 1;
903 }
904 } else if (PagesToFree > 0) {
905 //
906 // No Tail Guard, and pages after memory to free are still in Guard. It's a
907 // partial free case. We need to keep one page to be a head Guard.
908 //
909 PagesToFree -= 1;
910 }
911
912 *Memory = Start;
913 *NumberOfPages = PagesToFree;
914 }
915
916
917 /**
918 Adjust the pool head position to make sure the Guard page is adjavent to
919 pool tail or pool head.
920
921 @param[in] Memory Base address of memory allocated.
922 @param[in] NoPages Number of pages actually allocated.
923 @param[in] Size Size of memory requested.
924 (plus pool head/tail overhead)
925
926 @return Address of pool head
927 **/
928 VOID *
929 AdjustPoolHeadA (
930 IN EFI_PHYSICAL_ADDRESS Memory,
931 IN UINTN NoPages,
932 IN UINTN Size
933 )
934 {
935 if (Memory == 0 || (PcdGet8 (PcdHeapGuardPropertyMask) & BIT7) != 0) {
936 //
937 // Pool head is put near the head Guard
938 //
939 return (VOID *)(UINTN)Memory;
940 }
941
942 //
943 // Pool head is put near the tail Guard
944 //
945 Size = ALIGN_VALUE (Size, 8);
946 return (VOID *)(UINTN)(Memory + EFI_PAGES_TO_SIZE (NoPages) - Size);
947 }
948
949 /**
950 Get the page base address according to pool head address.
951
952 @param[in] Memory Head address of pool to free.
953
954 @return Address of pool head.
955 **/
956 VOID *
957 AdjustPoolHeadF (
958 IN EFI_PHYSICAL_ADDRESS Memory
959 )
960 {
961 if (Memory == 0 || (PcdGet8 (PcdHeapGuardPropertyMask) & BIT7) != 0) {
962 //
963 // Pool head is put near the head Guard
964 //
965 return (VOID *)(UINTN)Memory;
966 }
967
968 //
969 // Pool head is put near the tail Guard
970 //
971 return (VOID *)(UINTN)(Memory & ~EFI_PAGE_MASK);
972 }
973
974 /**
975 Helper function of memory allocation with Guard pages.
976
977 @param FreePageList The free page node.
978 @param NumberOfPages Number of pages to be allocated.
979 @param MaxAddress Request to allocate memory below this address.
980 @param MemoryType Type of memory requested.
981
982 @return Memory address of allocated pages.
983 **/
984 UINTN
985 InternalAllocMaxAddressWithGuard (
986 IN OUT LIST_ENTRY *FreePageList,
987 IN UINTN NumberOfPages,
988 IN UINTN MaxAddress,
989 IN EFI_MEMORY_TYPE MemoryType
990
991 )
992 {
993 LIST_ENTRY *Node;
994 FREE_PAGE_LIST *Pages;
995 UINTN PagesToAlloc;
996 UINTN HeadGuard;
997 UINTN TailGuard;
998 UINTN Address;
999
1000 for (Node = FreePageList->BackLink; Node != FreePageList;
1001 Node = Node->BackLink) {
1002 Pages = BASE_CR (Node, FREE_PAGE_LIST, Link);
1003 if (Pages->NumberOfPages >= NumberOfPages &&
1004 (UINTN)Pages + EFI_PAGES_TO_SIZE (NumberOfPages) - 1 <= MaxAddress) {
1005
1006 //
1007 // We may need 1 or 2 more pages for Guard. Check it out.
1008 //
1009 PagesToAlloc = NumberOfPages;
1010 TailGuard = (UINTN)Pages + EFI_PAGES_TO_SIZE (Pages->NumberOfPages);
1011 if (!IsGuardPage (TailGuard)) {
1012 //
1013 // Add one if no Guard at the end of current free memory block.
1014 //
1015 PagesToAlloc += 1;
1016 TailGuard = 0;
1017 }
1018
1019 HeadGuard = (UINTN)Pages +
1020 EFI_PAGES_TO_SIZE (Pages->NumberOfPages - PagesToAlloc) -
1021 EFI_PAGE_SIZE;
1022 if (!IsGuardPage (HeadGuard)) {
1023 //
1024 // Add one if no Guard at the page before the address to allocate
1025 //
1026 PagesToAlloc += 1;
1027 HeadGuard = 0;
1028 }
1029
1030 if (Pages->NumberOfPages < PagesToAlloc) {
1031 // Not enough space to allocate memory with Guards? Try next block.
1032 continue;
1033 }
1034
1035 Address = InternalAllocPagesOnOneNode (Pages, PagesToAlloc, MaxAddress);
1036 ConvertSmmMemoryMapEntry(MemoryType, Address, PagesToAlloc, FALSE);
1037 CoreFreeMemoryMapStack();
1038 if (HeadGuard == 0) {
1039 // Don't pass the Guard page to user.
1040 Address += EFI_PAGE_SIZE;
1041 }
1042 SetGuardForMemory (Address, NumberOfPages);
1043 return Address;
1044 }
1045 }
1046
1047 return (UINTN)(-1);
1048 }
1049
1050 /**
1051 Helper function of memory free with Guard pages.
1052
1053 @param[in] Memory Base address of memory being freed.
1054 @param[in] NumberOfPages The number of pages to free.
1055 @param[in] AddRegion If this memory is new added region.
1056
1057 @retval EFI_NOT_FOUND Could not find the entry that covers the range.
1058 @retval EFI_INVALID_PARAMETER Address not aligned, Address is zero or NumberOfPages is zero.
1059 @return EFI_SUCCESS Pages successfully freed.
1060 **/
1061 EFI_STATUS
1062 SmmInternalFreePagesExWithGuard (
1063 IN EFI_PHYSICAL_ADDRESS Memory,
1064 IN UINTN NumberOfPages,
1065 IN BOOLEAN AddRegion
1066 )
1067 {
1068 EFI_PHYSICAL_ADDRESS MemoryToFree;
1069 UINTN PagesToFree;
1070
1071 if (((Memory & EFI_PAGE_MASK) != 0) || (Memory == 0) || (NumberOfPages == 0)) {
1072 return EFI_INVALID_PARAMETER;
1073 }
1074
1075 MemoryToFree = Memory;
1076 PagesToFree = NumberOfPages;
1077
1078 AdjustMemoryF (&MemoryToFree, &PagesToFree);
1079 UnsetGuardForMemory (Memory, NumberOfPages);
1080 if (PagesToFree == 0) {
1081 return EFI_SUCCESS;
1082 }
1083
1084 return SmmInternalFreePagesEx (MemoryToFree, PagesToFree, AddRegion);
1085 }
1086
1087 /**
1088 Set all Guard pages which cannot be set during the non-SMM mode time.
1089 **/
1090 VOID
1091 SetAllGuardPages (
1092 VOID
1093 )
1094 {
1095 UINTN Entries[GUARDED_HEAP_MAP_TABLE_DEPTH];
1096 UINTN Shifts[GUARDED_HEAP_MAP_TABLE_DEPTH];
1097 UINTN Indices[GUARDED_HEAP_MAP_TABLE_DEPTH];
1098 UINT64 Tables[GUARDED_HEAP_MAP_TABLE_DEPTH];
1099 UINT64 Addresses[GUARDED_HEAP_MAP_TABLE_DEPTH];
1100 UINT64 TableEntry;
1101 UINT64 Address;
1102 UINT64 GuardPage;
1103 INTN Level;
1104 UINTN Index;
1105 BOOLEAN OnGuarding;
1106
1107 if (mGuardedMemoryMap == 0 ||
1108 mMapLevel == 0 ||
1109 mMapLevel > GUARDED_HEAP_MAP_TABLE_DEPTH) {
1110 return;
1111 }
1112
1113 CopyMem (Entries, mLevelMask, sizeof (Entries));
1114 CopyMem (Shifts, mLevelShift, sizeof (Shifts));
1115
1116 SetMem (Tables, sizeof(Tables), 0);
1117 SetMem (Addresses, sizeof(Addresses), 0);
1118 SetMem (Indices, sizeof(Indices), 0);
1119
1120 Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;
1121 Tables[Level] = mGuardedMemoryMap;
1122 Address = 0;
1123 OnGuarding = FALSE;
1124
1125 DEBUG_CODE (
1126 DumpGuardedMemoryBitmap ();
1127 );
1128
1129 while (TRUE) {
1130 if (Indices[Level] > Entries[Level]) {
1131 Tables[Level] = 0;
1132 Level -= 1;
1133 } else {
1134
1135 TableEntry = ((UINT64 *)(UINTN)(Tables[Level]))[Indices[Level]];
1136 Address = Addresses[Level];
1137
1138 if (TableEntry == 0) {
1139
1140 OnGuarding = FALSE;
1141
1142 } else if (Level < GUARDED_HEAP_MAP_TABLE_DEPTH - 1) {
1143
1144 Level += 1;
1145 Tables[Level] = TableEntry;
1146 Addresses[Level] = Address;
1147 Indices[Level] = 0;
1148
1149 continue;
1150
1151 } else {
1152
1153 Index = 0;
1154 while (Index < GUARDED_HEAP_MAP_ENTRY_BITS) {
1155 if ((TableEntry & 1) == 1) {
1156 if (OnGuarding) {
1157 GuardPage = 0;
1158 } else {
1159 GuardPage = Address - EFI_PAGE_SIZE;
1160 }
1161 OnGuarding = TRUE;
1162 } else {
1163 if (OnGuarding) {
1164 GuardPage = Address;
1165 } else {
1166 GuardPage = 0;
1167 }
1168 OnGuarding = FALSE;
1169 }
1170
1171 if (GuardPage != 0) {
1172 SetGuardPage (GuardPage);
1173 }
1174
1175 if (TableEntry == 0) {
1176 break;
1177 }
1178
1179 TableEntry = RShiftU64 (TableEntry, 1);
1180 Address += EFI_PAGE_SIZE;
1181 Index += 1;
1182 }
1183 }
1184 }
1185
1186 if (Level < (GUARDED_HEAP_MAP_TABLE_DEPTH - (INTN)mMapLevel)) {
1187 break;
1188 }
1189
1190 Indices[Level] += 1;
1191 Address = (Level == 0) ? 0 : Addresses[Level - 1];
1192 Addresses[Level] = Address | LShiftU64(Indices[Level], Shifts[Level]);
1193
1194 }
1195 }
1196
1197 /**
1198 Hook function used to set all Guard pages after entering SMM mode.
1199 **/
1200 VOID
1201 SmmEntryPointMemoryManagementHook (
1202 VOID
1203 )
1204 {
1205 EFI_STATUS Status;
1206
1207 if (mSmmMemoryAttribute == NULL) {
1208 Status = SmmLocateProtocol (
1209 &gEdkiiSmmMemoryAttributeProtocolGuid,
1210 NULL,
1211 (VOID **)&mSmmMemoryAttribute
1212 );
1213 if (!EFI_ERROR(Status)) {
1214 SetAllGuardPages ();
1215 }
1216 }
1217 }
1218
1219 /**
1220 Helper function to convert a UINT64 value in binary to a string.
1221
1222 @param[in] Value Value of a UINT64 integer.
1223 @param[out] BinString String buffer to contain the conversion result.
1224
1225 @return VOID.
1226 **/
1227 VOID
1228 Uint64ToBinString (
1229 IN UINT64 Value,
1230 OUT CHAR8 *BinString
1231 )
1232 {
1233 UINTN Index;
1234
1235 if (BinString == NULL) {
1236 return;
1237 }
1238
1239 for (Index = 64; Index > 0; --Index) {
1240 BinString[Index - 1] = '0' + (Value & 1);
1241 Value = RShiftU64 (Value, 1);
1242 }
1243 BinString[64] = '\0';
1244 }
1245
1246 /**
1247 Dump the guarded memory bit map.
1248 **/
1249 VOID
1250 EFIAPI
1251 DumpGuardedMemoryBitmap (
1252 VOID
1253 )
1254 {
1255 UINTN Entries[GUARDED_HEAP_MAP_TABLE_DEPTH];
1256 UINTN Shifts[GUARDED_HEAP_MAP_TABLE_DEPTH];
1257 UINTN Indices[GUARDED_HEAP_MAP_TABLE_DEPTH];
1258 UINT64 Tables[GUARDED_HEAP_MAP_TABLE_DEPTH];
1259 UINT64 Addresses[GUARDED_HEAP_MAP_TABLE_DEPTH];
1260 UINT64 TableEntry;
1261 UINT64 Address;
1262 INTN Level;
1263 UINTN RepeatZero;
1264 CHAR8 String[GUARDED_HEAP_MAP_ENTRY_BITS + 1];
1265 CHAR8 *Ruler1;
1266 CHAR8 *Ruler2;
1267
1268 if (mGuardedMemoryMap == 0 ||
1269 mMapLevel == 0 ||
1270 mMapLevel > GUARDED_HEAP_MAP_TABLE_DEPTH) {
1271 return;
1272 }
1273
1274 Ruler1 = " 3 2 1 0";
1275 Ruler2 = "FEDCBA9876543210FEDCBA9876543210FEDCBA9876543210FEDCBA9876543210";
1276
1277 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "============================="
1278 " Guarded Memory Bitmap "
1279 "==============================\r\n"));
1280 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, " %a\r\n", Ruler1));
1281 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, " %a\r\n", Ruler2));
1282
1283 CopyMem (Entries, mLevelMask, sizeof (Entries));
1284 CopyMem (Shifts, mLevelShift, sizeof (Shifts));
1285
1286 SetMem (Indices, sizeof(Indices), 0);
1287 SetMem (Tables, sizeof(Tables), 0);
1288 SetMem (Addresses, sizeof(Addresses), 0);
1289
1290 Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;
1291 Tables[Level] = mGuardedMemoryMap;
1292 Address = 0;
1293 RepeatZero = 0;
1294
1295 while (TRUE) {
1296 if (Indices[Level] > Entries[Level]) {
1297
1298 Tables[Level] = 0;
1299 Level -= 1;
1300 RepeatZero = 0;
1301
1302 DEBUG ((
1303 HEAP_GUARD_DEBUG_LEVEL,
1304 "========================================="
1305 "=========================================\r\n"
1306 ));
1307
1308 } else {
1309
1310 TableEntry = ((UINT64 *)(UINTN)Tables[Level])[Indices[Level]];
1311 Address = Addresses[Level];
1312
1313 if (TableEntry == 0) {
1314
1315 if (Level == GUARDED_HEAP_MAP_TABLE_DEPTH - 1) {
1316 if (RepeatZero == 0) {
1317 Uint64ToBinString(TableEntry, String);
1318 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "%016lx: %a\r\n", Address, String));
1319 } else if (RepeatZero == 1) {
1320 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "... : ...\r\n"));
1321 }
1322 RepeatZero += 1;
1323 }
1324
1325 } else if (Level < GUARDED_HEAP_MAP_TABLE_DEPTH - 1) {
1326
1327 Level += 1;
1328 Tables[Level] = TableEntry;
1329 Addresses[Level] = Address;
1330 Indices[Level] = 0;
1331 RepeatZero = 0;
1332
1333 continue;
1334
1335 } else {
1336
1337 RepeatZero = 0;
1338 Uint64ToBinString(TableEntry, String);
1339 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "%016lx: %a\r\n", Address, String));
1340
1341 }
1342 }
1343
1344 if (Level < (GUARDED_HEAP_MAP_TABLE_DEPTH - (INTN)mMapLevel)) {
1345 break;
1346 }
1347
1348 Indices[Level] += 1;
1349 Address = (Level == 0) ? 0 : Addresses[Level - 1];
1350 Addresses[Level] = Address | LShiftU64(Indices[Level], Shifts[Level]);
1351
1352 }
1353 }
1354
1355 /**
1356 Debug function used to verify if the Guard page is well set or not.
1357
1358 @param[in] BaseAddress Address of memory to check.
1359 @param[in] NumberOfPages Size of memory in pages.
1360
1361 @return TRUE The head Guard and tail Guard are both well set.
1362 @return FALSE The head Guard and/or tail Guard are not well set.
1363 **/
1364 BOOLEAN
1365 VerifyMemoryGuard (
1366 IN EFI_PHYSICAL_ADDRESS BaseAddress,
1367 IN UINTN NumberOfPages
1368 )
1369 {
1370 EFI_STATUS Status;
1371 UINT64 Attribute;
1372 EFI_PHYSICAL_ADDRESS Address;
1373
1374 if (mSmmMemoryAttribute == NULL) {
1375 return TRUE;
1376 }
1377
1378 Attribute = 0;
1379 Address = BaseAddress - EFI_PAGE_SIZE;
1380 Status = mSmmMemoryAttribute->GetMemoryAttributes (
1381 mSmmMemoryAttribute,
1382 Address,
1383 EFI_PAGE_SIZE,
1384 &Attribute
1385 );
1386 if (EFI_ERROR (Status) || (Attribute & EFI_MEMORY_RP) == 0) {
1387 DEBUG ((DEBUG_ERROR, "Head Guard is not set at: %016lx (%016lX)!!!\r\n",
1388 Address, Attribute));
1389 DumpGuardedMemoryBitmap ();
1390 return FALSE;
1391 }
1392
1393 Attribute = 0;
1394 Address = BaseAddress + EFI_PAGES_TO_SIZE (NumberOfPages);
1395 Status = mSmmMemoryAttribute->GetMemoryAttributes (
1396 mSmmMemoryAttribute,
1397 Address,
1398 EFI_PAGE_SIZE,
1399 &Attribute
1400 );
1401 if (EFI_ERROR (Status) || (Attribute & EFI_MEMORY_RP) == 0) {
1402 DEBUG ((DEBUG_ERROR, "Tail Guard is not set at: %016lx (%016lX)!!!\r\n",
1403 Address, Attribute));
1404 DumpGuardedMemoryBitmap ();
1405 return FALSE;
1406 }
1407
1408 return TRUE;
1409 }
1410