]> git.proxmox.com Git - mirror_edk2.git/blob - MdeModulePkg/Core/Dxe/Mem/HeapGuard.c
MdeModulePkg/DxeCore: add sanity check for SetMemoryAttributes
[mirror_edk2.git] / MdeModulePkg / Core / Dxe / Mem / HeapGuard.c
1 /** @file
2 UEFI Heap Guard functions.
3
4 Copyright (c) 2017-2018, Intel Corporation. All rights reserved.<BR>
5 This program and the accompanying materials
6 are licensed and made available under the terms and conditions of the BSD License
7 which accompanies this distribution. The full text of the license may be found at
8 http://opensource.org/licenses/bsd-license.php
9
10 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
11 WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
12
13 **/
14
15 #include "DxeMain.h"
16 #include "Imem.h"
17 #include "HeapGuard.h"
18
19 //
20 // Global to avoid infinite reentrance of memory allocation when updating
21 // page table attributes, which may need allocate pages for new PDE/PTE.
22 //
23 GLOBAL_REMOVE_IF_UNREFERENCED BOOLEAN mOnGuarding = FALSE;
24
25 //
26 // Pointer to table tracking the Guarded memory with bitmap, in which '1'
27 // is used to indicate memory guarded. '0' might be free memory or Guard
28 // page itself, depending on status of memory adjacent to it.
29 //
30 GLOBAL_REMOVE_IF_UNREFERENCED UINT64 mGuardedMemoryMap = 0;
31
32 //
33 // Current depth level of map table pointed by mGuardedMemoryMap.
34 // mMapLevel must be initialized at least by 1. It will be automatically
35 // updated according to the address of memory just tracked.
36 //
37 GLOBAL_REMOVE_IF_UNREFERENCED UINTN mMapLevel = 1;
38
39 //
40 // Shift and mask for each level of map table
41 //
42 GLOBAL_REMOVE_IF_UNREFERENCED UINTN mLevelShift[GUARDED_HEAP_MAP_TABLE_DEPTH]
43 = GUARDED_HEAP_MAP_TABLE_DEPTH_SHIFTS;
44 GLOBAL_REMOVE_IF_UNREFERENCED UINTN mLevelMask[GUARDED_HEAP_MAP_TABLE_DEPTH]
45 = GUARDED_HEAP_MAP_TABLE_DEPTH_MASKS;
46
47 /**
48 Set corresponding bits in bitmap table to 1 according to the address.
49
50 @param[in] Address Start address to set for.
51 @param[in] BitNumber Number of bits to set.
52 @param[in] BitMap Pointer to bitmap which covers the Address.
53
54 @return VOID.
55 **/
56 STATIC
57 VOID
58 SetBits (
59 IN EFI_PHYSICAL_ADDRESS Address,
60 IN UINTN BitNumber,
61 IN UINT64 *BitMap
62 )
63 {
64 UINTN Lsbs;
65 UINTN Qwords;
66 UINTN Msbs;
67 UINTN StartBit;
68 UINTN EndBit;
69
70 StartBit = (UINTN)GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address);
71 EndBit = (StartBit + BitNumber - 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
72
73 if ((StartBit + BitNumber) >= GUARDED_HEAP_MAP_ENTRY_BITS) {
74 Msbs = (GUARDED_HEAP_MAP_ENTRY_BITS - StartBit) %
75 GUARDED_HEAP_MAP_ENTRY_BITS;
76 Lsbs = (EndBit + 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
77 Qwords = (BitNumber - Msbs) / GUARDED_HEAP_MAP_ENTRY_BITS;
78 } else {
79 Msbs = BitNumber;
80 Lsbs = 0;
81 Qwords = 0;
82 }
83
84 if (Msbs > 0) {
85 *BitMap |= LShiftU64 (LShiftU64 (1, Msbs) - 1, StartBit);
86 BitMap += 1;
87 }
88
89 if (Qwords > 0) {
90 SetMem64 ((VOID *)BitMap, Qwords * GUARDED_HEAP_MAP_ENTRY_BYTES,
91 (UINT64)-1);
92 BitMap += Qwords;
93 }
94
95 if (Lsbs > 0) {
96 *BitMap |= (LShiftU64 (1, Lsbs) - 1);
97 }
98 }
99
100 /**
101 Set corresponding bits in bitmap table to 0 according to the address.
102
103 @param[in] Address Start address to set for.
104 @param[in] BitNumber Number of bits to set.
105 @param[in] BitMap Pointer to bitmap which covers the Address.
106
107 @return VOID.
108 **/
109 STATIC
110 VOID
111 ClearBits (
112 IN EFI_PHYSICAL_ADDRESS Address,
113 IN UINTN BitNumber,
114 IN UINT64 *BitMap
115 )
116 {
117 UINTN Lsbs;
118 UINTN Qwords;
119 UINTN Msbs;
120 UINTN StartBit;
121 UINTN EndBit;
122
123 StartBit = (UINTN)GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address);
124 EndBit = (StartBit + BitNumber - 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
125
126 if ((StartBit + BitNumber) >= GUARDED_HEAP_MAP_ENTRY_BITS) {
127 Msbs = (GUARDED_HEAP_MAP_ENTRY_BITS - StartBit) %
128 GUARDED_HEAP_MAP_ENTRY_BITS;
129 Lsbs = (EndBit + 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
130 Qwords = (BitNumber - Msbs) / GUARDED_HEAP_MAP_ENTRY_BITS;
131 } else {
132 Msbs = BitNumber;
133 Lsbs = 0;
134 Qwords = 0;
135 }
136
137 if (Msbs > 0) {
138 *BitMap &= ~LShiftU64 (LShiftU64 (1, Msbs) - 1, StartBit);
139 BitMap += 1;
140 }
141
142 if (Qwords > 0) {
143 SetMem64 ((VOID *)BitMap, Qwords * GUARDED_HEAP_MAP_ENTRY_BYTES, 0);
144 BitMap += Qwords;
145 }
146
147 if (Lsbs > 0) {
148 *BitMap &= ~(LShiftU64 (1, Lsbs) - 1);
149 }
150 }
151
152 /**
153 Get corresponding bits in bitmap table according to the address.
154
155 The value of bit 0 corresponds to the status of memory at given Address.
156 No more than 64 bits can be retrieved in one call.
157
158 @param[in] Address Start address to retrieve bits for.
159 @param[in] BitNumber Number of bits to get.
160 @param[in] BitMap Pointer to bitmap which covers the Address.
161
162 @return An integer containing the bits information.
163 **/
164 STATIC
165 UINT64
166 GetBits (
167 IN EFI_PHYSICAL_ADDRESS Address,
168 IN UINTN BitNumber,
169 IN UINT64 *BitMap
170 )
171 {
172 UINTN StartBit;
173 UINTN EndBit;
174 UINTN Lsbs;
175 UINTN Msbs;
176 UINT64 Result;
177
178 ASSERT (BitNumber <= GUARDED_HEAP_MAP_ENTRY_BITS);
179
180 StartBit = (UINTN)GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address);
181 EndBit = (StartBit + BitNumber - 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
182
183 if ((StartBit + BitNumber) > GUARDED_HEAP_MAP_ENTRY_BITS) {
184 Msbs = GUARDED_HEAP_MAP_ENTRY_BITS - StartBit;
185 Lsbs = (EndBit + 1) % GUARDED_HEAP_MAP_ENTRY_BITS;
186 } else {
187 Msbs = BitNumber;
188 Lsbs = 0;
189 }
190
191 if (StartBit == 0 && BitNumber == GUARDED_HEAP_MAP_ENTRY_BITS) {
192 Result = *BitMap;
193 } else {
194 Result = RShiftU64((*BitMap), StartBit) & (LShiftU64(1, Msbs) - 1);
195 if (Lsbs > 0) {
196 BitMap += 1;
197 Result |= LShiftU64 ((*BitMap) & (LShiftU64 (1, Lsbs) - 1), Msbs);
198 }
199 }
200
201 return Result;
202 }
203
204 /**
205 Locate the pointer of bitmap from the guarded memory bitmap tables, which
206 covers the given Address.
207
208 @param[in] Address Start address to search the bitmap for.
209 @param[in] AllocMapUnit Flag to indicate memory allocation for the table.
210 @param[out] BitMap Pointer to bitmap which covers the Address.
211
212 @return The bit number from given Address to the end of current map table.
213 **/
214 UINTN
215 FindGuardedMemoryMap (
216 IN EFI_PHYSICAL_ADDRESS Address,
217 IN BOOLEAN AllocMapUnit,
218 OUT UINT64 **BitMap
219 )
220 {
221 UINTN Level;
222 UINT64 *GuardMap;
223 UINT64 MapMemory;
224 UINTN Index;
225 UINTN Size;
226 UINTN BitsToUnitEnd;
227 EFI_STATUS Status;
228
229 //
230 // Adjust current map table depth according to the address to access
231 //
232 while (AllocMapUnit &&
233 mMapLevel < GUARDED_HEAP_MAP_TABLE_DEPTH &&
234 RShiftU64 (
235 Address,
236 mLevelShift[GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel - 1]
237 ) != 0) {
238
239 if (mGuardedMemoryMap != 0) {
240 Size = (mLevelMask[GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel - 1] + 1)
241 * GUARDED_HEAP_MAP_ENTRY_BYTES;
242 Status = CoreInternalAllocatePages (
243 AllocateAnyPages,
244 EfiBootServicesData,
245 EFI_SIZE_TO_PAGES (Size),
246 &MapMemory,
247 FALSE
248 );
249 ASSERT_EFI_ERROR (Status);
250 ASSERT (MapMemory != 0);
251
252 SetMem ((VOID *)(UINTN)MapMemory, Size, 0);
253
254 *(UINT64 *)(UINTN)MapMemory = mGuardedMemoryMap;
255 mGuardedMemoryMap = MapMemory;
256 }
257
258 mMapLevel++;
259
260 }
261
262 GuardMap = &mGuardedMemoryMap;
263 for (Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;
264 Level < GUARDED_HEAP_MAP_TABLE_DEPTH;
265 ++Level) {
266
267 if (*GuardMap == 0) {
268 if (!AllocMapUnit) {
269 GuardMap = NULL;
270 break;
271 }
272
273 Size = (mLevelMask[Level] + 1) * GUARDED_HEAP_MAP_ENTRY_BYTES;
274 Status = CoreInternalAllocatePages (
275 AllocateAnyPages,
276 EfiBootServicesData,
277 EFI_SIZE_TO_PAGES (Size),
278 &MapMemory,
279 FALSE
280 );
281 ASSERT_EFI_ERROR (Status);
282 ASSERT (MapMemory != 0);
283
284 SetMem ((VOID *)(UINTN)MapMemory, Size, 0);
285 *GuardMap = MapMemory;
286 }
287
288 Index = (UINTN)RShiftU64 (Address, mLevelShift[Level]);
289 Index &= mLevelMask[Level];
290 GuardMap = (UINT64 *)(UINTN)((*GuardMap) + Index * sizeof (UINT64));
291
292 }
293
294 BitsToUnitEnd = GUARDED_HEAP_MAP_BITS - GUARDED_HEAP_MAP_BIT_INDEX (Address);
295 *BitMap = GuardMap;
296
297 return BitsToUnitEnd;
298 }
299
300 /**
301 Set corresponding bits in bitmap table to 1 according to given memory range.
302
303 @param[in] Address Memory address to guard from.
304 @param[in] NumberOfPages Number of pages to guard.
305
306 @return VOID.
307 **/
308 VOID
309 EFIAPI
310 SetGuardedMemoryBits (
311 IN EFI_PHYSICAL_ADDRESS Address,
312 IN UINTN NumberOfPages
313 )
314 {
315 UINT64 *BitMap;
316 UINTN Bits;
317 UINTN BitsToUnitEnd;
318
319 while (NumberOfPages > 0) {
320 BitsToUnitEnd = FindGuardedMemoryMap (Address, TRUE, &BitMap);
321 ASSERT (BitMap != NULL);
322
323 if (NumberOfPages > BitsToUnitEnd) {
324 // Cross map unit
325 Bits = BitsToUnitEnd;
326 } else {
327 Bits = NumberOfPages;
328 }
329
330 SetBits (Address, Bits, BitMap);
331
332 NumberOfPages -= Bits;
333 Address += EFI_PAGES_TO_SIZE (Bits);
334 }
335 }
336
337 /**
338 Clear corresponding bits in bitmap table according to given memory range.
339
340 @param[in] Address Memory address to unset from.
341 @param[in] NumberOfPages Number of pages to unset guard.
342
343 @return VOID.
344 **/
345 VOID
346 EFIAPI
347 ClearGuardedMemoryBits (
348 IN EFI_PHYSICAL_ADDRESS Address,
349 IN UINTN NumberOfPages
350 )
351 {
352 UINT64 *BitMap;
353 UINTN Bits;
354 UINTN BitsToUnitEnd;
355
356 while (NumberOfPages > 0) {
357 BitsToUnitEnd = FindGuardedMemoryMap (Address, TRUE, &BitMap);
358 ASSERT (BitMap != NULL);
359
360 if (NumberOfPages > BitsToUnitEnd) {
361 // Cross map unit
362 Bits = BitsToUnitEnd;
363 } else {
364 Bits = NumberOfPages;
365 }
366
367 ClearBits (Address, Bits, BitMap);
368
369 NumberOfPages -= Bits;
370 Address += EFI_PAGES_TO_SIZE (Bits);
371 }
372 }
373
374 /**
375 Retrieve corresponding bits in bitmap table according to given memory range.
376
377 @param[in] Address Memory address to retrieve from.
378 @param[in] NumberOfPages Number of pages to retrieve.
379
380 @return An integer containing the guarded memory bitmap.
381 **/
382 UINTN
383 GetGuardedMemoryBits (
384 IN EFI_PHYSICAL_ADDRESS Address,
385 IN UINTN NumberOfPages
386 )
387 {
388 UINT64 *BitMap;
389 UINTN Bits;
390 UINTN Result;
391 UINTN Shift;
392 UINTN BitsToUnitEnd;
393
394 ASSERT (NumberOfPages <= GUARDED_HEAP_MAP_ENTRY_BITS);
395
396 Result = 0;
397 Shift = 0;
398 while (NumberOfPages > 0) {
399 BitsToUnitEnd = FindGuardedMemoryMap (Address, FALSE, &BitMap);
400
401 if (NumberOfPages > BitsToUnitEnd) {
402 // Cross map unit
403 Bits = BitsToUnitEnd;
404 } else {
405 Bits = NumberOfPages;
406 }
407
408 if (BitMap != NULL) {
409 Result |= LShiftU64 (GetBits (Address, Bits, BitMap), Shift);
410 }
411
412 Shift += Bits;
413 NumberOfPages -= Bits;
414 Address += EFI_PAGES_TO_SIZE (Bits);
415 }
416
417 return Result;
418 }
419
420 /**
421 Get bit value in bitmap table for the given address.
422
423 @param[in] Address The address to retrieve for.
424
425 @return 1 or 0.
426 **/
427 UINTN
428 EFIAPI
429 GetGuardMapBit (
430 IN EFI_PHYSICAL_ADDRESS Address
431 )
432 {
433 UINT64 *GuardMap;
434
435 FindGuardedMemoryMap (Address, FALSE, &GuardMap);
436 if (GuardMap != NULL) {
437 if (RShiftU64 (*GuardMap,
438 GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address)) & 1) {
439 return 1;
440 }
441 }
442
443 return 0;
444 }
445
446 /**
447 Set the bit in bitmap table for the given address.
448
449 @param[in] Address The address to set for.
450
451 @return VOID.
452 **/
453 VOID
454 EFIAPI
455 SetGuardMapBit (
456 IN EFI_PHYSICAL_ADDRESS Address
457 )
458 {
459 UINT64 *GuardMap;
460 UINT64 BitMask;
461
462 FindGuardedMemoryMap (Address, TRUE, &GuardMap);
463 if (GuardMap != NULL) {
464 BitMask = LShiftU64 (1, GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address));
465 *GuardMap |= BitMask;
466 }
467 }
468
469 /**
470 Clear the bit in bitmap table for the given address.
471
472 @param[in] Address The address to clear for.
473
474 @return VOID.
475 **/
476 VOID
477 EFIAPI
478 ClearGuardMapBit (
479 IN EFI_PHYSICAL_ADDRESS Address
480 )
481 {
482 UINT64 *GuardMap;
483 UINT64 BitMask;
484
485 FindGuardedMemoryMap (Address, TRUE, &GuardMap);
486 if (GuardMap != NULL) {
487 BitMask = LShiftU64 (1, GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address));
488 *GuardMap &= ~BitMask;
489 }
490 }
491
492 /**
493 Check to see if the page at the given address is a Guard page or not.
494
495 @param[in] Address The address to check for.
496
497 @return TRUE The page at Address is a Guard page.
498 @return FALSE The page at Address is not a Guard page.
499 **/
500 BOOLEAN
501 EFIAPI
502 IsGuardPage (
503 IN EFI_PHYSICAL_ADDRESS Address
504 )
505 {
506 UINTN BitMap;
507
508 //
509 // There must be at least one guarded page before and/or after given
510 // address if it's a Guard page. The bitmap pattern should be one of
511 // 001, 100 and 101
512 //
513 BitMap = GetGuardedMemoryBits (Address - EFI_PAGE_SIZE, 3);
514 return ((BitMap == BIT0) || (BitMap == BIT2) || (BitMap == (BIT2 | BIT0)));
515 }
516
517 /**
518 Check to see if the page at the given address is a head Guard page or not.
519
520 @param[in] Address The address to check for
521
522 @return TRUE The page at Address is a head Guard page
523 @return FALSE The page at Address is not a head Guard page
524 **/
525 BOOLEAN
526 EFIAPI
527 IsHeadGuard (
528 IN EFI_PHYSICAL_ADDRESS Address
529 )
530 {
531 return (GetGuardedMemoryBits (Address, 2) == BIT1);
532 }
533
534 /**
535 Check to see if the page at the given address is a tail Guard page or not.
536
537 @param[in] Address The address to check for.
538
539 @return TRUE The page at Address is a tail Guard page.
540 @return FALSE The page at Address is not a tail Guard page.
541 **/
542 BOOLEAN
543 EFIAPI
544 IsTailGuard (
545 IN EFI_PHYSICAL_ADDRESS Address
546 )
547 {
548 return (GetGuardedMemoryBits (Address - EFI_PAGE_SIZE, 2) == BIT0);
549 }
550
551 /**
552 Check to see if the page at the given address is guarded or not.
553
554 @param[in] Address The address to check for.
555
556 @return TRUE The page at Address is guarded.
557 @return FALSE The page at Address is not guarded.
558 **/
559 BOOLEAN
560 EFIAPI
561 IsMemoryGuarded (
562 IN EFI_PHYSICAL_ADDRESS Address
563 )
564 {
565 return (GetGuardMapBit (Address) == 1);
566 }
567
568 /**
569 Set the page at the given address to be a Guard page.
570
571 This is done by changing the page table attribute to be NOT PRSENT.
572
573 @param[in] BaseAddress Page address to Guard at
574
575 @return VOID
576 **/
577 VOID
578 EFIAPI
579 SetGuardPage (
580 IN EFI_PHYSICAL_ADDRESS BaseAddress
581 )
582 {
583 EFI_STATUS Status;
584
585 if (gCpu == NULL) {
586 return;
587 }
588
589 //
590 // Set flag to make sure allocating memory without GUARD for page table
591 // operation; otherwise infinite loops could be caused.
592 //
593 mOnGuarding = TRUE;
594 //
595 // Note: This might overwrite other attributes needed by other features,
596 // such as NX memory protection.
597 //
598 Status = gCpu->SetMemoryAttributes (gCpu, BaseAddress, EFI_PAGE_SIZE, EFI_MEMORY_RP);
599 ASSERT_EFI_ERROR (Status);
600 mOnGuarding = FALSE;
601 }
602
603 /**
604 Unset the Guard page at the given address to the normal memory.
605
606 This is done by changing the page table attribute to be PRSENT.
607
608 @param[in] BaseAddress Page address to Guard at.
609
610 @return VOID.
611 **/
612 VOID
613 EFIAPI
614 UnsetGuardPage (
615 IN EFI_PHYSICAL_ADDRESS BaseAddress
616 )
617 {
618 UINT64 Attributes;
619 EFI_STATUS Status;
620
621 if (gCpu == NULL) {
622 return;
623 }
624
625 //
626 // Once the Guard page is unset, it will be freed back to memory pool. NX
627 // memory protection must be restored for this page if NX is enabled for free
628 // memory.
629 //
630 Attributes = 0;
631 if ((PcdGet64 (PcdDxeNxMemoryProtectionPolicy) & (1 << EfiConventionalMemory)) != 0) {
632 Attributes |= EFI_MEMORY_XP;
633 }
634
635 //
636 // Set flag to make sure allocating memory without GUARD for page table
637 // operation; otherwise infinite loops could be caused.
638 //
639 mOnGuarding = TRUE;
640 //
641 // Note: This might overwrite other attributes needed by other features,
642 // such as memory protection (NX). Please make sure they are not enabled
643 // at the same time.
644 //
645 Status = gCpu->SetMemoryAttributes (gCpu, BaseAddress, EFI_PAGE_SIZE, Attributes);
646 ASSERT_EFI_ERROR (Status);
647 mOnGuarding = FALSE;
648 }
649
650 /**
651 Check to see if the memory at the given address should be guarded or not.
652
653 @param[in] MemoryType Memory type to check.
654 @param[in] AllocateType Allocation type to check.
655 @param[in] PageOrPool Indicate a page allocation or pool allocation.
656
657
658 @return TRUE The given type of memory should be guarded.
659 @return FALSE The given type of memory should not be guarded.
660 **/
661 BOOLEAN
662 IsMemoryTypeToGuard (
663 IN EFI_MEMORY_TYPE MemoryType,
664 IN EFI_ALLOCATE_TYPE AllocateType,
665 IN UINT8 PageOrPool
666 )
667 {
668 UINT64 TestBit;
669 UINT64 ConfigBit;
670 BOOLEAN InSmm;
671
672 if (AllocateType == AllocateAddress) {
673 return FALSE;
674 }
675
676 InSmm = FALSE;
677 if (gSmmBase2 != NULL) {
678 gSmmBase2->InSmm (gSmmBase2, &InSmm);
679 }
680
681 if (InSmm) {
682 return FALSE;
683 }
684
685 if ((PcdGet8 (PcdHeapGuardPropertyMask) & PageOrPool) == 0) {
686 return FALSE;
687 }
688
689 if (PageOrPool == GUARD_HEAP_TYPE_POOL) {
690 ConfigBit = PcdGet64 (PcdHeapGuardPoolType);
691 } else if (PageOrPool == GUARD_HEAP_TYPE_PAGE) {
692 ConfigBit = PcdGet64 (PcdHeapGuardPageType);
693 } else {
694 ConfigBit = (UINT64)-1;
695 }
696
697 if ((UINT32)MemoryType >= MEMORY_TYPE_OS_RESERVED_MIN) {
698 TestBit = BIT63;
699 } else if ((UINT32) MemoryType >= MEMORY_TYPE_OEM_RESERVED_MIN) {
700 TestBit = BIT62;
701 } else if (MemoryType < EfiMaxMemoryType) {
702 TestBit = LShiftU64 (1, MemoryType);
703 } else if (MemoryType == EfiMaxMemoryType) {
704 TestBit = (UINT64)-1;
705 } else {
706 TestBit = 0;
707 }
708
709 return ((ConfigBit & TestBit) != 0);
710 }
711
712 /**
713 Check to see if the pool at the given address should be guarded or not.
714
715 @param[in] MemoryType Pool type to check.
716
717
718 @return TRUE The given type of pool should be guarded.
719 @return FALSE The given type of pool should not be guarded.
720 **/
721 BOOLEAN
722 IsPoolTypeToGuard (
723 IN EFI_MEMORY_TYPE MemoryType
724 )
725 {
726 return IsMemoryTypeToGuard (MemoryType, AllocateAnyPages,
727 GUARD_HEAP_TYPE_POOL);
728 }
729
730 /**
731 Check to see if the page at the given address should be guarded or not.
732
733 @param[in] MemoryType Page type to check.
734 @param[in] AllocateType Allocation type to check.
735
736 @return TRUE The given type of page should be guarded.
737 @return FALSE The given type of page should not be guarded.
738 **/
739 BOOLEAN
740 IsPageTypeToGuard (
741 IN EFI_MEMORY_TYPE MemoryType,
742 IN EFI_ALLOCATE_TYPE AllocateType
743 )
744 {
745 return IsMemoryTypeToGuard (MemoryType, AllocateType, GUARD_HEAP_TYPE_PAGE);
746 }
747
748 /**
749 Check to see if the heap guard is enabled for page and/or pool allocation.
750
751 @return TRUE/FALSE.
752 **/
753 BOOLEAN
754 IsHeapGuardEnabled (
755 VOID
756 )
757 {
758 return IsMemoryTypeToGuard (EfiMaxMemoryType, AllocateAnyPages,
759 GUARD_HEAP_TYPE_POOL|GUARD_HEAP_TYPE_PAGE);
760 }
761
762 /**
763 Set head Guard and tail Guard for the given memory range.
764
765 @param[in] Memory Base address of memory to set guard for.
766 @param[in] NumberOfPages Memory size in pages.
767
768 @return VOID
769 **/
770 VOID
771 SetGuardForMemory (
772 IN EFI_PHYSICAL_ADDRESS Memory,
773 IN UINTN NumberOfPages
774 )
775 {
776 EFI_PHYSICAL_ADDRESS GuardPage;
777
778 //
779 // Set tail Guard
780 //
781 GuardPage = Memory + EFI_PAGES_TO_SIZE (NumberOfPages);
782 if (!IsGuardPage (GuardPage)) {
783 SetGuardPage (GuardPage);
784 }
785
786 // Set head Guard
787 GuardPage = Memory - EFI_PAGES_TO_SIZE (1);
788 if (!IsGuardPage (GuardPage)) {
789 SetGuardPage (GuardPage);
790 }
791
792 //
793 // Mark the memory range as Guarded
794 //
795 SetGuardedMemoryBits (Memory, NumberOfPages);
796 }
797
798 /**
799 Unset head Guard and tail Guard for the given memory range.
800
801 @param[in] Memory Base address of memory to unset guard for.
802 @param[in] NumberOfPages Memory size in pages.
803
804 @return VOID
805 **/
806 VOID
807 UnsetGuardForMemory (
808 IN EFI_PHYSICAL_ADDRESS Memory,
809 IN UINTN NumberOfPages
810 )
811 {
812 EFI_PHYSICAL_ADDRESS GuardPage;
813 UINT64 GuardBitmap;
814
815 if (NumberOfPages == 0) {
816 return;
817 }
818
819 //
820 // Head Guard must be one page before, if any.
821 //
822 // MSB-> 1 0 <-LSB
823 // -------------------
824 // Head Guard -> 0 1 -> Don't free Head Guard (shared Guard)
825 // Head Guard -> 0 0 -> Free Head Guard either (not shared Guard)
826 // 1 X -> Don't free first page (need a new Guard)
827 // (it'll be turned into a Guard page later)
828 // -------------------
829 // Start -> -1 -2
830 //
831 GuardPage = Memory - EFI_PAGES_TO_SIZE (1);
832 GuardBitmap = GetGuardedMemoryBits (Memory - EFI_PAGES_TO_SIZE (2), 2);
833 if ((GuardBitmap & BIT1) == 0) {
834 //
835 // Head Guard exists.
836 //
837 if ((GuardBitmap & BIT0) == 0) {
838 //
839 // If the head Guard is not a tail Guard of adjacent memory block,
840 // unset it.
841 //
842 UnsetGuardPage (GuardPage);
843 }
844 } else {
845 //
846 // Pages before memory to free are still in Guard. It's a partial free
847 // case. Turn first page of memory block to free into a new Guard.
848 //
849 SetGuardPage (Memory);
850 }
851
852 //
853 // Tail Guard must be the page after this memory block to free, if any.
854 //
855 // MSB-> 1 0 <-LSB
856 // --------------------
857 // 1 0 <- Tail Guard -> Don't free Tail Guard (shared Guard)
858 // 0 0 <- Tail Guard -> Free Tail Guard either (not shared Guard)
859 // X 1 -> Don't free last page (need a new Guard)
860 // (it'll be turned into a Guard page later)
861 // --------------------
862 // +1 +0 <- End
863 //
864 GuardPage = Memory + EFI_PAGES_TO_SIZE (NumberOfPages);
865 GuardBitmap = GetGuardedMemoryBits (GuardPage, 2);
866 if ((GuardBitmap & BIT0) == 0) {
867 //
868 // Tail Guard exists.
869 //
870 if ((GuardBitmap & BIT1) == 0) {
871 //
872 // If the tail Guard is not a head Guard of adjacent memory block,
873 // free it; otherwise, keep it.
874 //
875 UnsetGuardPage (GuardPage);
876 }
877 } else {
878 //
879 // Pages after memory to free are still in Guard. It's a partial free
880 // case. We need to keep one page to be a head Guard.
881 //
882 SetGuardPage (GuardPage - EFI_PAGES_TO_SIZE (1));
883 }
884
885 //
886 // No matter what, we just clear the mark of the Guarded memory.
887 //
888 ClearGuardedMemoryBits(Memory, NumberOfPages);
889 }
890
891 /**
892 Adjust address of free memory according to existing and/or required Guard.
893
894 This function will check if there're existing Guard pages of adjacent
895 memory blocks, and try to use it as the Guard page of the memory to be
896 allocated.
897
898 @param[in] Start Start address of free memory block.
899 @param[in] Size Size of free memory block.
900 @param[in] SizeRequested Size of memory to allocate.
901
902 @return The end address of memory block found.
903 @return 0 if no enough space for the required size of memory and its Guard.
904 **/
905 UINT64
906 AdjustMemoryS (
907 IN UINT64 Start,
908 IN UINT64 Size,
909 IN UINT64 SizeRequested
910 )
911 {
912 UINT64 Target;
913
914 //
915 // UEFI spec requires that allocated pool must be 8-byte aligned. If it's
916 // indicated to put the pool near the Tail Guard, we need extra bytes to
917 // make sure alignment of the returned pool address.
918 //
919 if ((PcdGet8 (PcdHeapGuardPropertyMask) & BIT7) == 0) {
920 SizeRequested = ALIGN_VALUE(SizeRequested, 8);
921 }
922
923 Target = Start + Size - SizeRequested;
924 ASSERT (Target >= Start);
925 if (Target == 0) {
926 return 0;
927 }
928
929 if (!IsGuardPage (Start + Size)) {
930 // No Guard at tail to share. One more page is needed.
931 Target -= EFI_PAGES_TO_SIZE (1);
932 }
933
934 // Out of range?
935 if (Target < Start) {
936 return 0;
937 }
938
939 // At the edge?
940 if (Target == Start) {
941 if (!IsGuardPage (Target - EFI_PAGES_TO_SIZE (1))) {
942 // No enough space for a new head Guard if no Guard at head to share.
943 return 0;
944 }
945 }
946
947 // OK, we have enough pages for memory and its Guards. Return the End of the
948 // free space.
949 return Target + SizeRequested - 1;
950 }
951
952 /**
953 Adjust the start address and number of pages to free according to Guard.
954
955 The purpose of this function is to keep the shared Guard page with adjacent
956 memory block if it's still in guard, or free it if no more sharing. Another
957 is to reserve pages as Guard pages in partial page free situation.
958
959 @param[in,out] Memory Base address of memory to free.
960 @param[in,out] NumberOfPages Size of memory to free.
961
962 @return VOID.
963 **/
964 VOID
965 AdjustMemoryF (
966 IN OUT EFI_PHYSICAL_ADDRESS *Memory,
967 IN OUT UINTN *NumberOfPages
968 )
969 {
970 EFI_PHYSICAL_ADDRESS Start;
971 EFI_PHYSICAL_ADDRESS MemoryToTest;
972 UINTN PagesToFree;
973 UINT64 GuardBitmap;
974
975 if (Memory == NULL || NumberOfPages == NULL || *NumberOfPages == 0) {
976 return;
977 }
978
979 Start = *Memory;
980 PagesToFree = *NumberOfPages;
981
982 //
983 // Head Guard must be one page before, if any.
984 //
985 // MSB-> 1 0 <-LSB
986 // -------------------
987 // Head Guard -> 0 1 -> Don't free Head Guard (shared Guard)
988 // Head Guard -> 0 0 -> Free Head Guard either (not shared Guard)
989 // 1 X -> Don't free first page (need a new Guard)
990 // (it'll be turned into a Guard page later)
991 // -------------------
992 // Start -> -1 -2
993 //
994 MemoryToTest = Start - EFI_PAGES_TO_SIZE (2);
995 GuardBitmap = GetGuardedMemoryBits (MemoryToTest, 2);
996 if ((GuardBitmap & BIT1) == 0) {
997 //
998 // Head Guard exists.
999 //
1000 if ((GuardBitmap & BIT0) == 0) {
1001 //
1002 // If the head Guard is not a tail Guard of adjacent memory block,
1003 // free it; otherwise, keep it.
1004 //
1005 Start -= EFI_PAGES_TO_SIZE (1);
1006 PagesToFree += 1;
1007 }
1008 } else {
1009 //
1010 // No Head Guard, and pages before memory to free are still in Guard. It's a
1011 // partial free case. We need to keep one page to be a tail Guard.
1012 //
1013 Start += EFI_PAGES_TO_SIZE (1);
1014 PagesToFree -= 1;
1015 }
1016
1017 //
1018 // Tail Guard must be the page after this memory block to free, if any.
1019 //
1020 // MSB-> 1 0 <-LSB
1021 // --------------------
1022 // 1 0 <- Tail Guard -> Don't free Tail Guard (shared Guard)
1023 // 0 0 <- Tail Guard -> Free Tail Guard either (not shared Guard)
1024 // X 1 -> Don't free last page (need a new Guard)
1025 // (it'll be turned into a Guard page later)
1026 // --------------------
1027 // +1 +0 <- End
1028 //
1029 MemoryToTest = Start + EFI_PAGES_TO_SIZE (PagesToFree);
1030 GuardBitmap = GetGuardedMemoryBits (MemoryToTest, 2);
1031 if ((GuardBitmap & BIT0) == 0) {
1032 //
1033 // Tail Guard exists.
1034 //
1035 if ((GuardBitmap & BIT1) == 0) {
1036 //
1037 // If the tail Guard is not a head Guard of adjacent memory block,
1038 // free it; otherwise, keep it.
1039 //
1040 PagesToFree += 1;
1041 }
1042 } else if (PagesToFree > 0) {
1043 //
1044 // No Tail Guard, and pages after memory to free are still in Guard. It's a
1045 // partial free case. We need to keep one page to be a head Guard.
1046 //
1047 PagesToFree -= 1;
1048 }
1049
1050 *Memory = Start;
1051 *NumberOfPages = PagesToFree;
1052 }
1053
1054 /**
1055 Adjust the base and number of pages to really allocate according to Guard.
1056
1057 @param[in,out] Memory Base address of free memory.
1058 @param[in,out] NumberOfPages Size of memory to allocate.
1059
1060 @return VOID.
1061 **/
1062 VOID
1063 AdjustMemoryA (
1064 IN OUT EFI_PHYSICAL_ADDRESS *Memory,
1065 IN OUT UINTN *NumberOfPages
1066 )
1067 {
1068 //
1069 // FindFreePages() has already taken the Guard into account. It's safe to
1070 // adjust the start address and/or number of pages here, to make sure that
1071 // the Guards are also "allocated".
1072 //
1073 if (!IsGuardPage (*Memory + EFI_PAGES_TO_SIZE (*NumberOfPages))) {
1074 // No tail Guard, add one.
1075 *NumberOfPages += 1;
1076 }
1077
1078 if (!IsGuardPage (*Memory - EFI_PAGE_SIZE)) {
1079 // No head Guard, add one.
1080 *Memory -= EFI_PAGE_SIZE;
1081 *NumberOfPages += 1;
1082 }
1083 }
1084
1085 /**
1086 Adjust the pool head position to make sure the Guard page is adjavent to
1087 pool tail or pool head.
1088
1089 @param[in] Memory Base address of memory allocated.
1090 @param[in] NoPages Number of pages actually allocated.
1091 @param[in] Size Size of memory requested.
1092 (plus pool head/tail overhead)
1093
1094 @return Address of pool head.
1095 **/
1096 VOID *
1097 AdjustPoolHeadA (
1098 IN EFI_PHYSICAL_ADDRESS Memory,
1099 IN UINTN NoPages,
1100 IN UINTN Size
1101 )
1102 {
1103 if (Memory == 0 || (PcdGet8 (PcdHeapGuardPropertyMask) & BIT7) != 0) {
1104 //
1105 // Pool head is put near the head Guard
1106 //
1107 return (VOID *)(UINTN)Memory;
1108 }
1109
1110 //
1111 // Pool head is put near the tail Guard
1112 //
1113 Size = ALIGN_VALUE (Size, 8);
1114 return (VOID *)(UINTN)(Memory + EFI_PAGES_TO_SIZE (NoPages) - Size);
1115 }
1116
1117 /**
1118 Get the page base address according to pool head address.
1119
1120 @param[in] Memory Head address of pool to free.
1121
1122 @return Address of pool head.
1123 **/
1124 VOID *
1125 AdjustPoolHeadF (
1126 IN EFI_PHYSICAL_ADDRESS Memory
1127 )
1128 {
1129 if (Memory == 0 || (PcdGet8 (PcdHeapGuardPropertyMask) & BIT7) != 0) {
1130 //
1131 // Pool head is put near the head Guard
1132 //
1133 return (VOID *)(UINTN)Memory;
1134 }
1135
1136 //
1137 // Pool head is put near the tail Guard
1138 //
1139 return (VOID *)(UINTN)(Memory & ~EFI_PAGE_MASK);
1140 }
1141
1142 /**
1143 Allocate or free guarded memory.
1144
1145 @param[in] Start Start address of memory to allocate or free.
1146 @param[in] NumberOfPages Memory size in pages.
1147 @param[in] NewType Memory type to convert to.
1148
1149 @return VOID.
1150 **/
1151 EFI_STATUS
1152 CoreConvertPagesWithGuard (
1153 IN UINT64 Start,
1154 IN UINTN NumberOfPages,
1155 IN EFI_MEMORY_TYPE NewType
1156 )
1157 {
1158 UINT64 OldStart;
1159 UINTN OldPages;
1160
1161 if (NewType == EfiConventionalMemory) {
1162 OldStart = Start;
1163 OldPages = NumberOfPages;
1164
1165 AdjustMemoryF (&Start, &NumberOfPages);
1166 //
1167 // It's safe to unset Guard page inside memory lock because there should
1168 // be no memory allocation occurred in updating memory page attribute at
1169 // this point. And unsetting Guard page before free will prevent Guard
1170 // page just freed back to pool from being allocated right away before
1171 // marking it usable (from non-present to present).
1172 //
1173 UnsetGuardForMemory (OldStart, OldPages);
1174 if (NumberOfPages == 0) {
1175 return EFI_SUCCESS;
1176 }
1177 } else {
1178 AdjustMemoryA (&Start, &NumberOfPages);
1179 }
1180
1181 return CoreConvertPages (Start, NumberOfPages, NewType);
1182 }
1183
1184 /**
1185 Set all Guard pages which cannot be set before CPU Arch Protocol installed.
1186 **/
1187 VOID
1188 SetAllGuardPages (
1189 VOID
1190 )
1191 {
1192 UINTN Entries[GUARDED_HEAP_MAP_TABLE_DEPTH];
1193 UINTN Shifts[GUARDED_HEAP_MAP_TABLE_DEPTH];
1194 UINTN Indices[GUARDED_HEAP_MAP_TABLE_DEPTH];
1195 UINT64 Tables[GUARDED_HEAP_MAP_TABLE_DEPTH];
1196 UINT64 Addresses[GUARDED_HEAP_MAP_TABLE_DEPTH];
1197 UINT64 TableEntry;
1198 UINT64 Address;
1199 UINT64 GuardPage;
1200 INTN Level;
1201 UINTN Index;
1202 BOOLEAN OnGuarding;
1203
1204 if (mGuardedMemoryMap == 0 ||
1205 mMapLevel == 0 ||
1206 mMapLevel > GUARDED_HEAP_MAP_TABLE_DEPTH) {
1207 return;
1208 }
1209
1210 CopyMem (Entries, mLevelMask, sizeof (Entries));
1211 CopyMem (Shifts, mLevelShift, sizeof (Shifts));
1212
1213 SetMem (Tables, sizeof(Tables), 0);
1214 SetMem (Addresses, sizeof(Addresses), 0);
1215 SetMem (Indices, sizeof(Indices), 0);
1216
1217 Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;
1218 Tables[Level] = mGuardedMemoryMap;
1219 Address = 0;
1220 OnGuarding = FALSE;
1221
1222 DEBUG_CODE (
1223 DumpGuardedMemoryBitmap ();
1224 );
1225
1226 while (TRUE) {
1227 if (Indices[Level] > Entries[Level]) {
1228 Tables[Level] = 0;
1229 Level -= 1;
1230 } else {
1231
1232 TableEntry = ((UINT64 *)(UINTN)(Tables[Level]))[Indices[Level]];
1233 Address = Addresses[Level];
1234
1235 if (TableEntry == 0) {
1236
1237 OnGuarding = FALSE;
1238
1239 } else if (Level < GUARDED_HEAP_MAP_TABLE_DEPTH - 1) {
1240
1241 Level += 1;
1242 Tables[Level] = TableEntry;
1243 Addresses[Level] = Address;
1244 Indices[Level] = 0;
1245
1246 continue;
1247
1248 } else {
1249
1250 Index = 0;
1251 while (Index < GUARDED_HEAP_MAP_ENTRY_BITS) {
1252 if ((TableEntry & 1) == 1) {
1253 if (OnGuarding) {
1254 GuardPage = 0;
1255 } else {
1256 GuardPage = Address - EFI_PAGE_SIZE;
1257 }
1258 OnGuarding = TRUE;
1259 } else {
1260 if (OnGuarding) {
1261 GuardPage = Address;
1262 } else {
1263 GuardPage = 0;
1264 }
1265 OnGuarding = FALSE;
1266 }
1267
1268 if (GuardPage != 0) {
1269 SetGuardPage (GuardPage);
1270 }
1271
1272 if (TableEntry == 0) {
1273 break;
1274 }
1275
1276 TableEntry = RShiftU64 (TableEntry, 1);
1277 Address += EFI_PAGE_SIZE;
1278 Index += 1;
1279 }
1280 }
1281 }
1282
1283 if (Level < (GUARDED_HEAP_MAP_TABLE_DEPTH - (INTN)mMapLevel)) {
1284 break;
1285 }
1286
1287 Indices[Level] += 1;
1288 Address = (Level == 0) ? 0 : Addresses[Level - 1];
1289 Addresses[Level] = Address | LShiftU64(Indices[Level], Shifts[Level]);
1290
1291 }
1292 }
1293
1294 /**
1295 Notify function used to set all Guard pages before CPU Arch Protocol installed.
1296 **/
1297 VOID
1298 HeapGuardCpuArchProtocolNotify (
1299 VOID
1300 )
1301 {
1302 ASSERT (gCpu != NULL);
1303 SetAllGuardPages ();
1304 }
1305
1306 /**
1307 Helper function to convert a UINT64 value in binary to a string.
1308
1309 @param[in] Value Value of a UINT64 integer.
1310 @param[out] BinString String buffer to contain the conversion result.
1311
1312 @return VOID.
1313 **/
1314 VOID
1315 Uint64ToBinString (
1316 IN UINT64 Value,
1317 OUT CHAR8 *BinString
1318 )
1319 {
1320 UINTN Index;
1321
1322 if (BinString == NULL) {
1323 return;
1324 }
1325
1326 for (Index = 64; Index > 0; --Index) {
1327 BinString[Index - 1] = '0' + (Value & 1);
1328 Value = RShiftU64 (Value, 1);
1329 }
1330 BinString[64] = '\0';
1331 }
1332
1333 /**
1334 Dump the guarded memory bit map.
1335 **/
1336 VOID
1337 EFIAPI
1338 DumpGuardedMemoryBitmap (
1339 VOID
1340 )
1341 {
1342 UINTN Entries[GUARDED_HEAP_MAP_TABLE_DEPTH];
1343 UINTN Shifts[GUARDED_HEAP_MAP_TABLE_DEPTH];
1344 UINTN Indices[GUARDED_HEAP_MAP_TABLE_DEPTH];
1345 UINT64 Tables[GUARDED_HEAP_MAP_TABLE_DEPTH];
1346 UINT64 Addresses[GUARDED_HEAP_MAP_TABLE_DEPTH];
1347 UINT64 TableEntry;
1348 UINT64 Address;
1349 INTN Level;
1350 UINTN RepeatZero;
1351 CHAR8 String[GUARDED_HEAP_MAP_ENTRY_BITS + 1];
1352 CHAR8 *Ruler1;
1353 CHAR8 *Ruler2;
1354
1355 if (mGuardedMemoryMap == 0 ||
1356 mMapLevel == 0 ||
1357 mMapLevel > GUARDED_HEAP_MAP_TABLE_DEPTH) {
1358 return;
1359 }
1360
1361 Ruler1 = " 3 2 1 0";
1362 Ruler2 = "FEDCBA9876543210FEDCBA9876543210FEDCBA9876543210FEDCBA9876543210";
1363
1364 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "============================="
1365 " Guarded Memory Bitmap "
1366 "==============================\r\n"));
1367 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, " %a\r\n", Ruler1));
1368 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, " %a\r\n", Ruler2));
1369
1370 CopyMem (Entries, mLevelMask, sizeof (Entries));
1371 CopyMem (Shifts, mLevelShift, sizeof (Shifts));
1372
1373 SetMem (Indices, sizeof(Indices), 0);
1374 SetMem (Tables, sizeof(Tables), 0);
1375 SetMem (Addresses, sizeof(Addresses), 0);
1376
1377 Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;
1378 Tables[Level] = mGuardedMemoryMap;
1379 Address = 0;
1380 RepeatZero = 0;
1381
1382 while (TRUE) {
1383 if (Indices[Level] > Entries[Level]) {
1384
1385 Tables[Level] = 0;
1386 Level -= 1;
1387 RepeatZero = 0;
1388
1389 DEBUG ((
1390 HEAP_GUARD_DEBUG_LEVEL,
1391 "========================================="
1392 "=========================================\r\n"
1393 ));
1394
1395 } else {
1396
1397 TableEntry = ((UINT64 *)(UINTN)Tables[Level])[Indices[Level]];
1398 Address = Addresses[Level];
1399
1400 if (TableEntry == 0) {
1401
1402 if (Level == GUARDED_HEAP_MAP_TABLE_DEPTH - 1) {
1403 if (RepeatZero == 0) {
1404 Uint64ToBinString(TableEntry, String);
1405 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "%016lx: %a\r\n", Address, String));
1406 } else if (RepeatZero == 1) {
1407 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "... : ...\r\n"));
1408 }
1409 RepeatZero += 1;
1410 }
1411
1412 } else if (Level < GUARDED_HEAP_MAP_TABLE_DEPTH - 1) {
1413
1414 Level += 1;
1415 Tables[Level] = TableEntry;
1416 Addresses[Level] = Address;
1417 Indices[Level] = 0;
1418 RepeatZero = 0;
1419
1420 continue;
1421
1422 } else {
1423
1424 RepeatZero = 0;
1425 Uint64ToBinString(TableEntry, String);
1426 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "%016lx: %a\r\n", Address, String));
1427
1428 }
1429 }
1430
1431 if (Level < (GUARDED_HEAP_MAP_TABLE_DEPTH - (INTN)mMapLevel)) {
1432 break;
1433 }
1434
1435 Indices[Level] += 1;
1436 Address = (Level == 0) ? 0 : Addresses[Level - 1];
1437 Addresses[Level] = Address | LShiftU64(Indices[Level], Shifts[Level]);
1438
1439 }
1440 }
1441