]> git.proxmox.com Git - mirror_edk2.git/blame - MdeModulePkg/Core/Dxe/Mem/HeapGuard.c
UefiCpuPkg: Move AsmRelocateApLoopStart from Mpfuncs.nasm to AmdSev.nasm
[mirror_edk2.git] / MdeModulePkg / Core / Dxe / Mem / HeapGuard.c
CommitLineData
e63da9f0
JW
1/** @file\r
2 UEFI Heap Guard functions.\r
3\r
8b13bca9 4Copyright (c) 2017-2018, Intel Corporation. All rights reserved.<BR>\r
9d510e61 5SPDX-License-Identifier: BSD-2-Clause-Patent\r
e63da9f0
JW
6\r
7**/\r
8\r
9#include "DxeMain.h"\r
10#include "Imem.h"\r
11#include "HeapGuard.h"\r
12\r
13//\r
14// Global to avoid infinite reentrance of memory allocation when updating\r
15// page table attributes, which may need allocate pages for new PDE/PTE.\r
16//\r
1436aea4 17GLOBAL_REMOVE_IF_UNREFERENCED BOOLEAN mOnGuarding = FALSE;\r
e63da9f0
JW
18\r
19//\r
20// Pointer to table tracking the Guarded memory with bitmap, in which '1'\r
21// is used to indicate memory guarded. '0' might be free memory or Guard\r
22// page itself, depending on status of memory adjacent to it.\r
23//\r
1436aea4 24GLOBAL_REMOVE_IF_UNREFERENCED UINT64 mGuardedMemoryMap = 0;\r
e63da9f0
JW
25\r
26//\r
27// Current depth level of map table pointed by mGuardedMemoryMap.\r
28// mMapLevel must be initialized at least by 1. It will be automatically\r
29// updated according to the address of memory just tracked.\r
30//\r
1436aea4 31GLOBAL_REMOVE_IF_UNREFERENCED UINTN mMapLevel = 1;\r
e63da9f0
JW
32\r
33//\r
34// Shift and mask for each level of map table\r
35//\r
1436aea4
MK
36GLOBAL_REMOVE_IF_UNREFERENCED UINTN mLevelShift[GUARDED_HEAP_MAP_TABLE_DEPTH]\r
37 = GUARDED_HEAP_MAP_TABLE_DEPTH_SHIFTS;\r
38GLOBAL_REMOVE_IF_UNREFERENCED UINTN mLevelMask[GUARDED_HEAP_MAP_TABLE_DEPTH]\r
39 = GUARDED_HEAP_MAP_TABLE_DEPTH_MASKS;\r
e63da9f0 40\r
63ebde8e
JW
41//\r
42// Used for promoting freed but not used pages.\r
43//\r
1436aea4 44GLOBAL_REMOVE_IF_UNREFERENCED EFI_PHYSICAL_ADDRESS mLastPromotedPage = BASE_4GB;\r
63ebde8e 45\r
e63da9f0
JW
46/**\r
47 Set corresponding bits in bitmap table to 1 according to the address.\r
48\r
49 @param[in] Address Start address to set for.\r
50 @param[in] BitNumber Number of bits to set.\r
51 @param[in] BitMap Pointer to bitmap which covers the Address.\r
52\r
53 @return VOID.\r
54**/\r
55STATIC\r
56VOID\r
57SetBits (\r
1436aea4
MK
58 IN EFI_PHYSICAL_ADDRESS Address,\r
59 IN UINTN BitNumber,\r
60 IN UINT64 *BitMap\r
e63da9f0
JW
61 )\r
62{\r
1436aea4
MK
63 UINTN Lsbs;\r
64 UINTN Qwords;\r
65 UINTN Msbs;\r
66 UINTN StartBit;\r
67 UINTN EndBit;\r
e63da9f0 68\r
1436aea4
MK
69 StartBit = (UINTN)GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address);\r
70 EndBit = (StartBit + BitNumber - 1) % GUARDED_HEAP_MAP_ENTRY_BITS;\r
e63da9f0 71\r
36f2f049 72 if ((StartBit + BitNumber) >= GUARDED_HEAP_MAP_ENTRY_BITS) {\r
1436aea4
MK
73 Msbs = (GUARDED_HEAP_MAP_ENTRY_BITS - StartBit) %\r
74 GUARDED_HEAP_MAP_ENTRY_BITS;\r
75 Lsbs = (EndBit + 1) % GUARDED_HEAP_MAP_ENTRY_BITS;\r
76 Qwords = (BitNumber - Msbs) / GUARDED_HEAP_MAP_ENTRY_BITS;\r
e63da9f0 77 } else {\r
1436aea4
MK
78 Msbs = BitNumber;\r
79 Lsbs = 0;\r
80 Qwords = 0;\r
e63da9f0
JW
81 }\r
82\r
83 if (Msbs > 0) {\r
84 *BitMap |= LShiftU64 (LShiftU64 (1, Msbs) - 1, StartBit);\r
85 BitMap += 1;\r
86 }\r
87\r
88 if (Qwords > 0) {\r
1436aea4
MK
89 SetMem64 (\r
90 (VOID *)BitMap,\r
91 Qwords * GUARDED_HEAP_MAP_ENTRY_BYTES,\r
92 (UINT64)-1\r
93 );\r
e63da9f0
JW
94 BitMap += Qwords;\r
95 }\r
96\r
97 if (Lsbs > 0) {\r
98 *BitMap |= (LShiftU64 (1, Lsbs) - 1);\r
99 }\r
100}\r
101\r
102/**\r
103 Set corresponding bits in bitmap table to 0 according to the address.\r
104\r
105 @param[in] Address Start address to set for.\r
106 @param[in] BitNumber Number of bits to set.\r
107 @param[in] BitMap Pointer to bitmap which covers the Address.\r
108\r
109 @return VOID.\r
110**/\r
111STATIC\r
112VOID\r
113ClearBits (\r
1436aea4
MK
114 IN EFI_PHYSICAL_ADDRESS Address,\r
115 IN UINTN BitNumber,\r
116 IN UINT64 *BitMap\r
e63da9f0
JW
117 )\r
118{\r
1436aea4
MK
119 UINTN Lsbs;\r
120 UINTN Qwords;\r
121 UINTN Msbs;\r
122 UINTN StartBit;\r
123 UINTN EndBit;\r
e63da9f0 124\r
1436aea4
MK
125 StartBit = (UINTN)GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address);\r
126 EndBit = (StartBit + BitNumber - 1) % GUARDED_HEAP_MAP_ENTRY_BITS;\r
e63da9f0 127\r
36f2f049 128 if ((StartBit + BitNumber) >= GUARDED_HEAP_MAP_ENTRY_BITS) {\r
1436aea4
MK
129 Msbs = (GUARDED_HEAP_MAP_ENTRY_BITS - StartBit) %\r
130 GUARDED_HEAP_MAP_ENTRY_BITS;\r
131 Lsbs = (EndBit + 1) % GUARDED_HEAP_MAP_ENTRY_BITS;\r
132 Qwords = (BitNumber - Msbs) / GUARDED_HEAP_MAP_ENTRY_BITS;\r
e63da9f0 133 } else {\r
1436aea4
MK
134 Msbs = BitNumber;\r
135 Lsbs = 0;\r
136 Qwords = 0;\r
e63da9f0
JW
137 }\r
138\r
139 if (Msbs > 0) {\r
140 *BitMap &= ~LShiftU64 (LShiftU64 (1, Msbs) - 1, StartBit);\r
141 BitMap += 1;\r
142 }\r
143\r
144 if (Qwords > 0) {\r
145 SetMem64 ((VOID *)BitMap, Qwords * GUARDED_HEAP_MAP_ENTRY_BYTES, 0);\r
146 BitMap += Qwords;\r
147 }\r
148\r
149 if (Lsbs > 0) {\r
150 *BitMap &= ~(LShiftU64 (1, Lsbs) - 1);\r
151 }\r
152}\r
153\r
154/**\r
155 Get corresponding bits in bitmap table according to the address.\r
156\r
157 The value of bit 0 corresponds to the status of memory at given Address.\r
158 No more than 64 bits can be retrieved in one call.\r
159\r
160 @param[in] Address Start address to retrieve bits for.\r
161 @param[in] BitNumber Number of bits to get.\r
162 @param[in] BitMap Pointer to bitmap which covers the Address.\r
163\r
164 @return An integer containing the bits information.\r
165**/\r
166STATIC\r
167UINT64\r
168GetBits (\r
1436aea4
MK
169 IN EFI_PHYSICAL_ADDRESS Address,\r
170 IN UINTN BitNumber,\r
171 IN UINT64 *BitMap\r
e63da9f0
JW
172 )\r
173{\r
1436aea4
MK
174 UINTN StartBit;\r
175 UINTN EndBit;\r
176 UINTN Lsbs;\r
177 UINTN Msbs;\r
178 UINT64 Result;\r
e63da9f0
JW
179\r
180 ASSERT (BitNumber <= GUARDED_HEAP_MAP_ENTRY_BITS);\r
181\r
1436aea4
MK
182 StartBit = (UINTN)GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address);\r
183 EndBit = (StartBit + BitNumber - 1) % GUARDED_HEAP_MAP_ENTRY_BITS;\r
e63da9f0
JW
184\r
185 if ((StartBit + BitNumber) > GUARDED_HEAP_MAP_ENTRY_BITS) {\r
186 Msbs = GUARDED_HEAP_MAP_ENTRY_BITS - StartBit;\r
187 Lsbs = (EndBit + 1) % GUARDED_HEAP_MAP_ENTRY_BITS;\r
188 } else {\r
189 Msbs = BitNumber;\r
190 Lsbs = 0;\r
191 }\r
192\r
1436aea4 193 if ((StartBit == 0) && (BitNumber == GUARDED_HEAP_MAP_ENTRY_BITS)) {\r
36f2f049
JW
194 Result = *BitMap;\r
195 } else {\r
1436aea4 196 Result = RShiftU64 ((*BitMap), StartBit) & (LShiftU64 (1, Msbs) - 1);\r
36f2f049 197 if (Lsbs > 0) {\r
1436aea4
MK
198 BitMap += 1;\r
199 Result |= LShiftU64 ((*BitMap) & (LShiftU64 (1, Lsbs) - 1), Msbs);\r
36f2f049 200 }\r
e63da9f0
JW
201 }\r
202\r
203 return Result;\r
204}\r
205\r
206/**\r
207 Locate the pointer of bitmap from the guarded memory bitmap tables, which\r
208 covers the given Address.\r
209\r
210 @param[in] Address Start address to search the bitmap for.\r
211 @param[in] AllocMapUnit Flag to indicate memory allocation for the table.\r
212 @param[out] BitMap Pointer to bitmap which covers the Address.\r
213\r
214 @return The bit number from given Address to the end of current map table.\r
215**/\r
216UINTN\r
217FindGuardedMemoryMap (\r
1436aea4
MK
218 IN EFI_PHYSICAL_ADDRESS Address,\r
219 IN BOOLEAN AllocMapUnit,\r
220 OUT UINT64 **BitMap\r
e63da9f0
JW
221 )\r
222{\r
1436aea4
MK
223 UINTN Level;\r
224 UINT64 *GuardMap;\r
225 UINT64 MapMemory;\r
226 UINTN Index;\r
227 UINTN Size;\r
228 UINTN BitsToUnitEnd;\r
229 EFI_STATUS Status;\r
e63da9f0 230\r
17efd446
SZ
231 MapMemory = 0;\r
232\r
e63da9f0
JW
233 //\r
234 // Adjust current map table depth according to the address to access\r
235 //\r
dd12683e
JW
236 while (AllocMapUnit &&\r
237 mMapLevel < GUARDED_HEAP_MAP_TABLE_DEPTH &&\r
e63da9f0
JW
238 RShiftU64 (\r
239 Address,\r
240 mLevelShift[GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel - 1]\r
1436aea4
MK
241 ) != 0)\r
242 {\r
e63da9f0
JW
243 if (mGuardedMemoryMap != 0) {\r
244 Size = (mLevelMask[GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel - 1] + 1)\r
245 * GUARDED_HEAP_MAP_ENTRY_BYTES;\r
246 Status = CoreInternalAllocatePages (\r
1436aea4
MK
247 AllocateAnyPages,\r
248 EfiBootServicesData,\r
249 EFI_SIZE_TO_PAGES (Size),\r
250 &MapMemory,\r
251 FALSE\r
252 );\r
e63da9f0
JW
253 ASSERT_EFI_ERROR (Status);\r
254 ASSERT (MapMemory != 0);\r
255\r
256 SetMem ((VOID *)(UINTN)MapMemory, Size, 0);\r
257\r
258 *(UINT64 *)(UINTN)MapMemory = mGuardedMemoryMap;\r
1436aea4 259 mGuardedMemoryMap = MapMemory;\r
e63da9f0
JW
260 }\r
261\r
262 mMapLevel++;\r
e63da9f0
JW
263 }\r
264\r
265 GuardMap = &mGuardedMemoryMap;\r
266 for (Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;\r
267 Level < GUARDED_HEAP_MAP_TABLE_DEPTH;\r
1436aea4
MK
268 ++Level)\r
269 {\r
e63da9f0
JW
270 if (*GuardMap == 0) {\r
271 if (!AllocMapUnit) {\r
272 GuardMap = NULL;\r
273 break;\r
274 }\r
275\r
1436aea4 276 Size = (mLevelMask[Level] + 1) * GUARDED_HEAP_MAP_ENTRY_BYTES;\r
e63da9f0 277 Status = CoreInternalAllocatePages (\r
1436aea4
MK
278 AllocateAnyPages,\r
279 EfiBootServicesData,\r
280 EFI_SIZE_TO_PAGES (Size),\r
281 &MapMemory,\r
282 FALSE\r
283 );\r
e63da9f0
JW
284 ASSERT_EFI_ERROR (Status);\r
285 ASSERT (MapMemory != 0);\r
286\r
287 SetMem ((VOID *)(UINTN)MapMemory, Size, 0);\r
288 *GuardMap = MapMemory;\r
289 }\r
290\r
1436aea4
MK
291 Index = (UINTN)RShiftU64 (Address, mLevelShift[Level]);\r
292 Index &= mLevelMask[Level];\r
293 GuardMap = (UINT64 *)(UINTN)((*GuardMap) + Index * sizeof (UINT64));\r
e63da9f0
JW
294 }\r
295\r
296 BitsToUnitEnd = GUARDED_HEAP_MAP_BITS - GUARDED_HEAP_MAP_BIT_INDEX (Address);\r
297 *BitMap = GuardMap;\r
298\r
299 return BitsToUnitEnd;\r
300}\r
301\r
302/**\r
303 Set corresponding bits in bitmap table to 1 according to given memory range.\r
304\r
305 @param[in] Address Memory address to guard from.\r
306 @param[in] NumberOfPages Number of pages to guard.\r
307\r
308 @return VOID.\r
309**/\r
310VOID\r
311EFIAPI\r
312SetGuardedMemoryBits (\r
1436aea4
MK
313 IN EFI_PHYSICAL_ADDRESS Address,\r
314 IN UINTN NumberOfPages\r
e63da9f0
JW
315 )\r
316{\r
1436aea4
MK
317 UINT64 *BitMap;\r
318 UINTN Bits;\r
319 UINTN BitsToUnitEnd;\r
e63da9f0
JW
320\r
321 while (NumberOfPages > 0) {\r
322 BitsToUnitEnd = FindGuardedMemoryMap (Address, TRUE, &BitMap);\r
323 ASSERT (BitMap != NULL);\r
324\r
325 if (NumberOfPages > BitsToUnitEnd) {\r
326 // Cross map unit\r
327 Bits = BitsToUnitEnd;\r
328 } else {\r
1436aea4 329 Bits = NumberOfPages;\r
e63da9f0
JW
330 }\r
331\r
332 SetBits (Address, Bits, BitMap);\r
333\r
334 NumberOfPages -= Bits;\r
335 Address += EFI_PAGES_TO_SIZE (Bits);\r
336 }\r
337}\r
338\r
339/**\r
340 Clear corresponding bits in bitmap table according to given memory range.\r
341\r
342 @param[in] Address Memory address to unset from.\r
343 @param[in] NumberOfPages Number of pages to unset guard.\r
344\r
345 @return VOID.\r
346**/\r
347VOID\r
348EFIAPI\r
349ClearGuardedMemoryBits (\r
1436aea4
MK
350 IN EFI_PHYSICAL_ADDRESS Address,\r
351 IN UINTN NumberOfPages\r
e63da9f0
JW
352 )\r
353{\r
1436aea4
MK
354 UINT64 *BitMap;\r
355 UINTN Bits;\r
356 UINTN BitsToUnitEnd;\r
e63da9f0
JW
357\r
358 while (NumberOfPages > 0) {\r
359 BitsToUnitEnd = FindGuardedMemoryMap (Address, TRUE, &BitMap);\r
360 ASSERT (BitMap != NULL);\r
361\r
362 if (NumberOfPages > BitsToUnitEnd) {\r
363 // Cross map unit\r
364 Bits = BitsToUnitEnd;\r
365 } else {\r
1436aea4 366 Bits = NumberOfPages;\r
e63da9f0
JW
367 }\r
368\r
369 ClearBits (Address, Bits, BitMap);\r
370\r
371 NumberOfPages -= Bits;\r
372 Address += EFI_PAGES_TO_SIZE (Bits);\r
373 }\r
374}\r
375\r
376/**\r
377 Retrieve corresponding bits in bitmap table according to given memory range.\r
378\r
379 @param[in] Address Memory address to retrieve from.\r
380 @param[in] NumberOfPages Number of pages to retrieve.\r
381\r
382 @return An integer containing the guarded memory bitmap.\r
383**/\r
63ebde8e 384UINT64\r
e63da9f0 385GetGuardedMemoryBits (\r
1436aea4
MK
386 IN EFI_PHYSICAL_ADDRESS Address,\r
387 IN UINTN NumberOfPages\r
e63da9f0
JW
388 )\r
389{\r
1436aea4
MK
390 UINT64 *BitMap;\r
391 UINTN Bits;\r
392 UINT64 Result;\r
393 UINTN Shift;\r
394 UINTN BitsToUnitEnd;\r
e63da9f0
JW
395\r
396 ASSERT (NumberOfPages <= GUARDED_HEAP_MAP_ENTRY_BITS);\r
397\r
398 Result = 0;\r
399 Shift = 0;\r
400 while (NumberOfPages > 0) {\r
401 BitsToUnitEnd = FindGuardedMemoryMap (Address, FALSE, &BitMap);\r
402\r
403 if (NumberOfPages > BitsToUnitEnd) {\r
404 // Cross map unit\r
1436aea4 405 Bits = BitsToUnitEnd;\r
e63da9f0 406 } else {\r
1436aea4 407 Bits = NumberOfPages;\r
e63da9f0
JW
408 }\r
409\r
410 if (BitMap != NULL) {\r
411 Result |= LShiftU64 (GetBits (Address, Bits, BitMap), Shift);\r
412 }\r
413\r
414 Shift += Bits;\r
415 NumberOfPages -= Bits;\r
416 Address += EFI_PAGES_TO_SIZE (Bits);\r
417 }\r
418\r
419 return Result;\r
420}\r
421\r
422/**\r
423 Get bit value in bitmap table for the given address.\r
424\r
425 @param[in] Address The address to retrieve for.\r
426\r
427 @return 1 or 0.\r
428**/\r
429UINTN\r
430EFIAPI\r
431GetGuardMapBit (\r
1436aea4 432 IN EFI_PHYSICAL_ADDRESS Address\r
e63da9f0
JW
433 )\r
434{\r
1436aea4 435 UINT64 *GuardMap;\r
e63da9f0
JW
436\r
437 FindGuardedMemoryMap (Address, FALSE, &GuardMap);\r
438 if (GuardMap != NULL) {\r
1436aea4
MK
439 if (RShiftU64 (\r
440 *GuardMap,\r
441 GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address)\r
442 ) & 1)\r
443 {\r
e63da9f0
JW
444 return 1;\r
445 }\r
446 }\r
447\r
448 return 0;\r
449}\r
450\r
e63da9f0
JW
451/**\r
452 Check to see if the page at the given address is a Guard page or not.\r
453\r
454 @param[in] Address The address to check for.\r
455\r
456 @return TRUE The page at Address is a Guard page.\r
457 @return FALSE The page at Address is not a Guard page.\r
458**/\r
459BOOLEAN\r
460EFIAPI\r
461IsGuardPage (\r
1436aea4 462 IN EFI_PHYSICAL_ADDRESS Address\r
e63da9f0
JW
463 )\r
464{\r
1436aea4 465 UINT64 BitMap;\r
e63da9f0
JW
466\r
467 //\r
468 // There must be at least one guarded page before and/or after given\r
469 // address if it's a Guard page. The bitmap pattern should be one of\r
470 // 001, 100 and 101\r
471 //\r
472 BitMap = GetGuardedMemoryBits (Address - EFI_PAGE_SIZE, 3);\r
473 return ((BitMap == BIT0) || (BitMap == BIT2) || (BitMap == (BIT2 | BIT0)));\r
474}\r
475\r
e63da9f0
JW
476/**\r
477 Check to see if the page at the given address is guarded or not.\r
478\r
479 @param[in] Address The address to check for.\r
480\r
481 @return TRUE The page at Address is guarded.\r
482 @return FALSE The page at Address is not guarded.\r
483**/\r
484BOOLEAN\r
485EFIAPI\r
486IsMemoryGuarded (\r
1436aea4 487 IN EFI_PHYSICAL_ADDRESS Address\r
e63da9f0
JW
488 )\r
489{\r
490 return (GetGuardMapBit (Address) == 1);\r
491}\r
492\r
493/**\r
494 Set the page at the given address to be a Guard page.\r
495\r
496 This is done by changing the page table attribute to be NOT PRSENT.\r
497\r
498 @param[in] BaseAddress Page address to Guard at\r
499\r
500 @return VOID\r
501**/\r
502VOID\r
503EFIAPI\r
504SetGuardPage (\r
1436aea4 505 IN EFI_PHYSICAL_ADDRESS BaseAddress\r
e63da9f0
JW
506 )\r
507{\r
1436aea4 508 EFI_STATUS Status;\r
a5cd613c 509\r
7fef06af
JW
510 if (gCpu == NULL) {\r
511 return;\r
512 }\r
513\r
e63da9f0
JW
514 //\r
515 // Set flag to make sure allocating memory without GUARD for page table\r
516 // operation; otherwise infinite loops could be caused.\r
517 //\r
518 mOnGuarding = TRUE;\r
519 //\r
520 // Note: This might overwrite other attributes needed by other features,\r
c44218e5 521 // such as NX memory protection.\r
e63da9f0 522 //\r
a5cd613c
JW
523 Status = gCpu->SetMemoryAttributes (gCpu, BaseAddress, EFI_PAGE_SIZE, EFI_MEMORY_RP);\r
524 ASSERT_EFI_ERROR (Status);\r
e63da9f0
JW
525 mOnGuarding = FALSE;\r
526}\r
527\r
528/**\r
529 Unset the Guard page at the given address to the normal memory.\r
530\r
531 This is done by changing the page table attribute to be PRSENT.\r
532\r
533 @param[in] BaseAddress Page address to Guard at.\r
534\r
535 @return VOID.\r
536**/\r
537VOID\r
538EFIAPI\r
539UnsetGuardPage (\r
1436aea4 540 IN EFI_PHYSICAL_ADDRESS BaseAddress\r
e63da9f0
JW
541 )\r
542{\r
1436aea4
MK
543 UINT64 Attributes;\r
544 EFI_STATUS Status;\r
c44218e5 545\r
7fef06af
JW
546 if (gCpu == NULL) {\r
547 return;\r
548 }\r
549\r
c44218e5
JW
550 //\r
551 // Once the Guard page is unset, it will be freed back to memory pool. NX\r
552 // memory protection must be restored for this page if NX is enabled for free\r
553 // memory.\r
554 //\r
555 Attributes = 0;\r
556 if ((PcdGet64 (PcdDxeNxMemoryProtectionPolicy) & (1 << EfiConventionalMemory)) != 0) {\r
557 Attributes |= EFI_MEMORY_XP;\r
558 }\r
559\r
e63da9f0
JW
560 //\r
561 // Set flag to make sure allocating memory without GUARD for page table\r
562 // operation; otherwise infinite loops could be caused.\r
563 //\r
564 mOnGuarding = TRUE;\r
565 //\r
566 // Note: This might overwrite other attributes needed by other features,\r
567 // such as memory protection (NX). Please make sure they are not enabled\r
568 // at the same time.\r
569 //\r
a5cd613c
JW
570 Status = gCpu->SetMemoryAttributes (gCpu, BaseAddress, EFI_PAGE_SIZE, Attributes);\r
571 ASSERT_EFI_ERROR (Status);\r
e63da9f0
JW
572 mOnGuarding = FALSE;\r
573}\r
574\r
575/**\r
576 Check to see if the memory at the given address should be guarded or not.\r
577\r
578 @param[in] MemoryType Memory type to check.\r
579 @param[in] AllocateType Allocation type to check.\r
580 @param[in] PageOrPool Indicate a page allocation or pool allocation.\r
581\r
582\r
583 @return TRUE The given type of memory should be guarded.\r
584 @return FALSE The given type of memory should not be guarded.\r
585**/\r
586BOOLEAN\r
587IsMemoryTypeToGuard (\r
1436aea4
MK
588 IN EFI_MEMORY_TYPE MemoryType,\r
589 IN EFI_ALLOCATE_TYPE AllocateType,\r
590 IN UINT8 PageOrPool\r
e63da9f0
JW
591 )\r
592{\r
1436aea4
MK
593 UINT64 TestBit;\r
594 UINT64 ConfigBit;\r
e63da9f0 595\r
7fef06af 596 if (AllocateType == AllocateAddress) {\r
e63da9f0
JW
597 return FALSE;\r
598 }\r
599\r
e63da9f0
JW
600 if ((PcdGet8 (PcdHeapGuardPropertyMask) & PageOrPool) == 0) {\r
601 return FALSE;\r
602 }\r
603\r
604 if (PageOrPool == GUARD_HEAP_TYPE_POOL) {\r
605 ConfigBit = PcdGet64 (PcdHeapGuardPoolType);\r
606 } else if (PageOrPool == GUARD_HEAP_TYPE_PAGE) {\r
607 ConfigBit = PcdGet64 (PcdHeapGuardPageType);\r
608 } else {\r
609 ConfigBit = (UINT64)-1;\r
610 }\r
611\r
612 if ((UINT32)MemoryType >= MEMORY_TYPE_OS_RESERVED_MIN) {\r
613 TestBit = BIT63;\r
1436aea4 614 } else if ((UINT32)MemoryType >= MEMORY_TYPE_OEM_RESERVED_MIN) {\r
e63da9f0
JW
615 TestBit = BIT62;\r
616 } else if (MemoryType < EfiMaxMemoryType) {\r
617 TestBit = LShiftU64 (1, MemoryType);\r
618 } else if (MemoryType == EfiMaxMemoryType) {\r
619 TestBit = (UINT64)-1;\r
620 } else {\r
621 TestBit = 0;\r
622 }\r
623\r
624 return ((ConfigBit & TestBit) != 0);\r
625}\r
626\r
627/**\r
628 Check to see if the pool at the given address should be guarded or not.\r
629\r
630 @param[in] MemoryType Pool type to check.\r
631\r
632\r
633 @return TRUE The given type of pool should be guarded.\r
634 @return FALSE The given type of pool should not be guarded.\r
635**/\r
636BOOLEAN\r
637IsPoolTypeToGuard (\r
1436aea4 638 IN EFI_MEMORY_TYPE MemoryType\r
e63da9f0
JW
639 )\r
640{\r
1436aea4
MK
641 return IsMemoryTypeToGuard (\r
642 MemoryType,\r
643 AllocateAnyPages,\r
644 GUARD_HEAP_TYPE_POOL\r
645 );\r
e63da9f0
JW
646}\r
647\r
648/**\r
649 Check to see if the page at the given address should be guarded or not.\r
650\r
651 @param[in] MemoryType Page type to check.\r
652 @param[in] AllocateType Allocation type to check.\r
653\r
654 @return TRUE The given type of page should be guarded.\r
655 @return FALSE The given type of page should not be guarded.\r
656**/\r
657BOOLEAN\r
658IsPageTypeToGuard (\r
1436aea4
MK
659 IN EFI_MEMORY_TYPE MemoryType,\r
660 IN EFI_ALLOCATE_TYPE AllocateType\r
e63da9f0
JW
661 )\r
662{\r
663 return IsMemoryTypeToGuard (MemoryType, AllocateType, GUARD_HEAP_TYPE_PAGE);\r
664}\r
665\r
a6a0a597
JW
666/**\r
667 Check to see if the heap guard is enabled for page and/or pool allocation.\r
668\r
63ebde8e
JW
669 @param[in] GuardType Specify the sub-type(s) of Heap Guard.\r
670\r
a6a0a597
JW
671 @return TRUE/FALSE.\r
672**/\r
673BOOLEAN\r
674IsHeapGuardEnabled (\r
1436aea4 675 UINT8 GuardType\r
a6a0a597
JW
676 )\r
677{\r
63ebde8e 678 return IsMemoryTypeToGuard (EfiMaxMemoryType, AllocateAnyPages, GuardType);\r
a6a0a597
JW
679}\r
680\r
e63da9f0
JW
681/**\r
682 Set head Guard and tail Guard for the given memory range.\r
683\r
684 @param[in] Memory Base address of memory to set guard for.\r
685 @param[in] NumberOfPages Memory size in pages.\r
686\r
687 @return VOID\r
688**/\r
689VOID\r
690SetGuardForMemory (\r
1436aea4
MK
691 IN EFI_PHYSICAL_ADDRESS Memory,\r
692 IN UINTN NumberOfPages\r
e63da9f0
JW
693 )\r
694{\r
1436aea4 695 EFI_PHYSICAL_ADDRESS GuardPage;\r
e63da9f0
JW
696\r
697 //\r
698 // Set tail Guard\r
699 //\r
700 GuardPage = Memory + EFI_PAGES_TO_SIZE (NumberOfPages);\r
701 if (!IsGuardPage (GuardPage)) {\r
702 SetGuardPage (GuardPage);\r
703 }\r
704\r
705 // Set head Guard\r
706 GuardPage = Memory - EFI_PAGES_TO_SIZE (1);\r
707 if (!IsGuardPage (GuardPage)) {\r
708 SetGuardPage (GuardPage);\r
709 }\r
710\r
711 //\r
712 // Mark the memory range as Guarded\r
713 //\r
714 SetGuardedMemoryBits (Memory, NumberOfPages);\r
715}\r
716\r
717/**\r
718 Unset head Guard and tail Guard for the given memory range.\r
719\r
720 @param[in] Memory Base address of memory to unset guard for.\r
721 @param[in] NumberOfPages Memory size in pages.\r
722\r
723 @return VOID\r
724**/\r
725VOID\r
726UnsetGuardForMemory (\r
1436aea4
MK
727 IN EFI_PHYSICAL_ADDRESS Memory,\r
728 IN UINTN NumberOfPages\r
e63da9f0
JW
729 )\r
730{\r
731 EFI_PHYSICAL_ADDRESS GuardPage;\r
6cf0a677 732 UINT64 GuardBitmap;\r
e63da9f0
JW
733\r
734 if (NumberOfPages == 0) {\r
735 return;\r
736 }\r
737\r
738 //\r
739 // Head Guard must be one page before, if any.\r
740 //\r
6cf0a677
JW
741 // MSB-> 1 0 <-LSB\r
742 // -------------------\r
743 // Head Guard -> 0 1 -> Don't free Head Guard (shared Guard)\r
744 // Head Guard -> 0 0 -> Free Head Guard either (not shared Guard)\r
745 // 1 X -> Don't free first page (need a new Guard)\r
746 // (it'll be turned into a Guard page later)\r
747 // -------------------\r
748 // Start -> -1 -2\r
749 //\r
1436aea4 750 GuardPage = Memory - EFI_PAGES_TO_SIZE (1);\r
6cf0a677
JW
751 GuardBitmap = GetGuardedMemoryBits (Memory - EFI_PAGES_TO_SIZE (2), 2);\r
752 if ((GuardBitmap & BIT1) == 0) {\r
753 //\r
754 // Head Guard exists.\r
755 //\r
756 if ((GuardBitmap & BIT0) == 0) {\r
e63da9f0
JW
757 //\r
758 // If the head Guard is not a tail Guard of adjacent memory block,\r
759 // unset it.\r
760 //\r
761 UnsetGuardPage (GuardPage);\r
762 }\r
6cf0a677 763 } else {\r
e63da9f0
JW
764 //\r
765 // Pages before memory to free are still in Guard. It's a partial free\r
766 // case. Turn first page of memory block to free into a new Guard.\r
767 //\r
768 SetGuardPage (Memory);\r
769 }\r
770\r
771 //\r
772 // Tail Guard must be the page after this memory block to free, if any.\r
773 //\r
6cf0a677
JW
774 // MSB-> 1 0 <-LSB\r
775 // --------------------\r
776 // 1 0 <- Tail Guard -> Don't free Tail Guard (shared Guard)\r
777 // 0 0 <- Tail Guard -> Free Tail Guard either (not shared Guard)\r
778 // X 1 -> Don't free last page (need a new Guard)\r
779 // (it'll be turned into a Guard page later)\r
780 // --------------------\r
781 // +1 +0 <- End\r
782 //\r
1436aea4 783 GuardPage = Memory + EFI_PAGES_TO_SIZE (NumberOfPages);\r
6cf0a677
JW
784 GuardBitmap = GetGuardedMemoryBits (GuardPage, 2);\r
785 if ((GuardBitmap & BIT0) == 0) {\r
786 //\r
787 // Tail Guard exists.\r
788 //\r
789 if ((GuardBitmap & BIT1) == 0) {\r
e63da9f0
JW
790 //\r
791 // If the tail Guard is not a head Guard of adjacent memory block,\r
792 // free it; otherwise, keep it.\r
793 //\r
794 UnsetGuardPage (GuardPage);\r
795 }\r
6cf0a677 796 } else {\r
e63da9f0
JW
797 //\r
798 // Pages after memory to free are still in Guard. It's a partial free\r
799 // case. We need to keep one page to be a head Guard.\r
800 //\r
801 SetGuardPage (GuardPage - EFI_PAGES_TO_SIZE (1));\r
802 }\r
803\r
804 //\r
805 // No matter what, we just clear the mark of the Guarded memory.\r
806 //\r
1436aea4 807 ClearGuardedMemoryBits (Memory, NumberOfPages);\r
e63da9f0
JW
808}\r
809\r
810/**\r
811 Adjust address of free memory according to existing and/or required Guard.\r
812\r
813 This function will check if there're existing Guard pages of adjacent\r
814 memory blocks, and try to use it as the Guard page of the memory to be\r
815 allocated.\r
816\r
817 @param[in] Start Start address of free memory block.\r
818 @param[in] Size Size of free memory block.\r
819 @param[in] SizeRequested Size of memory to allocate.\r
820\r
821 @return The end address of memory block found.\r
822 @return 0 if no enough space for the required size of memory and its Guard.\r
823**/\r
824UINT64\r
825AdjustMemoryS (\r
1436aea4
MK
826 IN UINT64 Start,\r
827 IN UINT64 Size,\r
828 IN UINT64 SizeRequested\r
e63da9f0
JW
829 )\r
830{\r
831 UINT64 Target;\r
832\r
c44218e5
JW
833 //\r
834 // UEFI spec requires that allocated pool must be 8-byte aligned. If it's\r
835 // indicated to put the pool near the Tail Guard, we need extra bytes to\r
836 // make sure alignment of the returned pool address.\r
837 //\r
838 if ((PcdGet8 (PcdHeapGuardPropertyMask) & BIT7) == 0) {\r
1436aea4 839 SizeRequested = ALIGN_VALUE (SizeRequested, 8);\r
c44218e5
JW
840 }\r
841\r
e63da9f0 842 Target = Start + Size - SizeRequested;\r
dd12683e
JW
843 ASSERT (Target >= Start);\r
844 if (Target == 0) {\r
845 return 0;\r
846 }\r
e63da9f0 847\r
e63da9f0
JW
848 if (!IsGuardPage (Start + Size)) {\r
849 // No Guard at tail to share. One more page is needed.\r
850 Target -= EFI_PAGES_TO_SIZE (1);\r
851 }\r
852\r
853 // Out of range?\r
854 if (Target < Start) {\r
855 return 0;\r
856 }\r
857\r
858 // At the edge?\r
859 if (Target == Start) {\r
860 if (!IsGuardPage (Target - EFI_PAGES_TO_SIZE (1))) {\r
861 // No enough space for a new head Guard if no Guard at head to share.\r
862 return 0;\r
863 }\r
864 }\r
865\r
866 // OK, we have enough pages for memory and its Guards. Return the End of the\r
867 // free space.\r
868 return Target + SizeRequested - 1;\r
869}\r
870\r
871/**\r
872 Adjust the start address and number of pages to free according to Guard.\r
873\r
874 The purpose of this function is to keep the shared Guard page with adjacent\r
875 memory block if it's still in guard, or free it if no more sharing. Another\r
876 is to reserve pages as Guard pages in partial page free situation.\r
877\r
878 @param[in,out] Memory Base address of memory to free.\r
879 @param[in,out] NumberOfPages Size of memory to free.\r
880\r
881 @return VOID.\r
882**/\r
883VOID\r
884AdjustMemoryF (\r
1436aea4
MK
885 IN OUT EFI_PHYSICAL_ADDRESS *Memory,\r
886 IN OUT UINTN *NumberOfPages\r
e63da9f0
JW
887 )\r
888{\r
889 EFI_PHYSICAL_ADDRESS Start;\r
890 EFI_PHYSICAL_ADDRESS MemoryToTest;\r
891 UINTN PagesToFree;\r
6cf0a677 892 UINT64 GuardBitmap;\r
e63da9f0 893\r
1436aea4 894 if ((Memory == NULL) || (NumberOfPages == NULL) || (*NumberOfPages == 0)) {\r
e63da9f0
JW
895 return;\r
896 }\r
897\r
1436aea4 898 Start = *Memory;\r
e63da9f0
JW
899 PagesToFree = *NumberOfPages;\r
900\r
901 //\r
902 // Head Guard must be one page before, if any.\r
903 //\r
6cf0a677
JW
904 // MSB-> 1 0 <-LSB\r
905 // -------------------\r
906 // Head Guard -> 0 1 -> Don't free Head Guard (shared Guard)\r
907 // Head Guard -> 0 0 -> Free Head Guard either (not shared Guard)\r
908 // 1 X -> Don't free first page (need a new Guard)\r
909 // (it'll be turned into a Guard page later)\r
910 // -------------------\r
911 // Start -> -1 -2\r
912 //\r
913 MemoryToTest = Start - EFI_PAGES_TO_SIZE (2);\r
1436aea4 914 GuardBitmap = GetGuardedMemoryBits (MemoryToTest, 2);\r
6cf0a677
JW
915 if ((GuardBitmap & BIT1) == 0) {\r
916 //\r
917 // Head Guard exists.\r
918 //\r
919 if ((GuardBitmap & BIT0) == 0) {\r
e63da9f0
JW
920 //\r
921 // If the head Guard is not a tail Guard of adjacent memory block,\r
922 // free it; otherwise, keep it.\r
923 //\r
924 Start -= EFI_PAGES_TO_SIZE (1);\r
925 PagesToFree += 1;\r
926 }\r
6cf0a677 927 } else {\r
e63da9f0 928 //\r
6cf0a677
JW
929 // No Head Guard, and pages before memory to free are still in Guard. It's a\r
930 // partial free case. We need to keep one page to be a tail Guard.\r
e63da9f0
JW
931 //\r
932 Start += EFI_PAGES_TO_SIZE (1);\r
933 PagesToFree -= 1;\r
934 }\r
935\r
936 //\r
937 // Tail Guard must be the page after this memory block to free, if any.\r
938 //\r
6cf0a677
JW
939 // MSB-> 1 0 <-LSB\r
940 // --------------------\r
941 // 1 0 <- Tail Guard -> Don't free Tail Guard (shared Guard)\r
942 // 0 0 <- Tail Guard -> Free Tail Guard either (not shared Guard)\r
943 // X 1 -> Don't free last page (need a new Guard)\r
944 // (it'll be turned into a Guard page later)\r
945 // --------------------\r
946 // +1 +0 <- End\r
947 //\r
e63da9f0 948 MemoryToTest = Start + EFI_PAGES_TO_SIZE (PagesToFree);\r
1436aea4 949 GuardBitmap = GetGuardedMemoryBits (MemoryToTest, 2);\r
6cf0a677
JW
950 if ((GuardBitmap & BIT0) == 0) {\r
951 //\r
952 // Tail Guard exists.\r
953 //\r
954 if ((GuardBitmap & BIT1) == 0) {\r
e63da9f0
JW
955 //\r
956 // If the tail Guard is not a head Guard of adjacent memory block,\r
957 // free it; otherwise, keep it.\r
958 //\r
959 PagesToFree += 1;\r
960 }\r
6cf0a677 961 } else if (PagesToFree > 0) {\r
e63da9f0 962 //\r
6cf0a677
JW
963 // No Tail Guard, and pages after memory to free are still in Guard. It's a\r
964 // partial free case. We need to keep one page to be a head Guard.\r
e63da9f0
JW
965 //\r
966 PagesToFree -= 1;\r
967 }\r
968\r
1436aea4
MK
969 *Memory = Start;\r
970 *NumberOfPages = PagesToFree;\r
e63da9f0
JW
971}\r
972\r
973/**\r
974 Adjust the base and number of pages to really allocate according to Guard.\r
975\r
976 @param[in,out] Memory Base address of free memory.\r
977 @param[in,out] NumberOfPages Size of memory to allocate.\r
978\r
979 @return VOID.\r
980**/\r
981VOID\r
982AdjustMemoryA (\r
1436aea4
MK
983 IN OUT EFI_PHYSICAL_ADDRESS *Memory,\r
984 IN OUT UINTN *NumberOfPages\r
e63da9f0
JW
985 )\r
986{\r
987 //\r
988 // FindFreePages() has already taken the Guard into account. It's safe to\r
989 // adjust the start address and/or number of pages here, to make sure that\r
990 // the Guards are also "allocated".\r
991 //\r
992 if (!IsGuardPage (*Memory + EFI_PAGES_TO_SIZE (*NumberOfPages))) {\r
993 // No tail Guard, add one.\r
994 *NumberOfPages += 1;\r
995 }\r
996\r
997 if (!IsGuardPage (*Memory - EFI_PAGE_SIZE)) {\r
998 // No head Guard, add one.\r
999 *Memory -= EFI_PAGE_SIZE;\r
1000 *NumberOfPages += 1;\r
1001 }\r
1002}\r
1003\r
1004/**\r
1005 Adjust the pool head position to make sure the Guard page is adjavent to\r
1006 pool tail or pool head.\r
1007\r
1008 @param[in] Memory Base address of memory allocated.\r
1009 @param[in] NoPages Number of pages actually allocated.\r
1010 @param[in] Size Size of memory requested.\r
1011 (plus pool head/tail overhead)\r
1012\r
1013 @return Address of pool head.\r
1014**/\r
1015VOID *\r
1016AdjustPoolHeadA (\r
1436aea4
MK
1017 IN EFI_PHYSICAL_ADDRESS Memory,\r
1018 IN UINTN NoPages,\r
1019 IN UINTN Size\r
e63da9f0
JW
1020 )\r
1021{\r
1436aea4 1022 if ((Memory == 0) || ((PcdGet8 (PcdHeapGuardPropertyMask) & BIT7) != 0)) {\r
e63da9f0
JW
1023 //\r
1024 // Pool head is put near the head Guard\r
1025 //\r
1026 return (VOID *)(UINTN)Memory;\r
1027 }\r
1028\r
1029 //\r
1030 // Pool head is put near the tail Guard\r
1031 //\r
c44218e5 1032 Size = ALIGN_VALUE (Size, 8);\r
e63da9f0
JW
1033 return (VOID *)(UINTN)(Memory + EFI_PAGES_TO_SIZE (NoPages) - Size);\r
1034}\r
1035\r
1036/**\r
1037 Get the page base address according to pool head address.\r
1038\r
1039 @param[in] Memory Head address of pool to free.\r
1040\r
1041 @return Address of pool head.\r
1042**/\r
1043VOID *\r
1044AdjustPoolHeadF (\r
1436aea4 1045 IN EFI_PHYSICAL_ADDRESS Memory\r
e63da9f0
JW
1046 )\r
1047{\r
1436aea4 1048 if ((Memory == 0) || ((PcdGet8 (PcdHeapGuardPropertyMask) & BIT7) != 0)) {\r
e63da9f0
JW
1049 //\r
1050 // Pool head is put near the head Guard\r
1051 //\r
1052 return (VOID *)(UINTN)Memory;\r
1053 }\r
1054\r
1055 //\r
1056 // Pool head is put near the tail Guard\r
1057 //\r
1058 return (VOID *)(UINTN)(Memory & ~EFI_PAGE_MASK);\r
1059}\r
1060\r
1061/**\r
1062 Allocate or free guarded memory.\r
1063\r
1064 @param[in] Start Start address of memory to allocate or free.\r
1065 @param[in] NumberOfPages Memory size in pages.\r
1066 @param[in] NewType Memory type to convert to.\r
1067\r
1068 @return VOID.\r
1069**/\r
1070EFI_STATUS\r
1071CoreConvertPagesWithGuard (\r
1072 IN UINT64 Start,\r
1073 IN UINTN NumberOfPages,\r
1074 IN EFI_MEMORY_TYPE NewType\r
1075 )\r
1076{\r
425d2569
JW
1077 UINT64 OldStart;\r
1078 UINTN OldPages;\r
1079\r
e63da9f0 1080 if (NewType == EfiConventionalMemory) {\r
425d2569
JW
1081 OldStart = Start;\r
1082 OldPages = NumberOfPages;\r
1083\r
e63da9f0 1084 AdjustMemoryF (&Start, &NumberOfPages);\r
425d2569
JW
1085 //\r
1086 // It's safe to unset Guard page inside memory lock because there should\r
1087 // be no memory allocation occurred in updating memory page attribute at\r
1088 // this point. And unsetting Guard page before free will prevent Guard\r
1089 // page just freed back to pool from being allocated right away before\r
1090 // marking it usable (from non-present to present).\r
1091 //\r
1092 UnsetGuardForMemory (OldStart, OldPages);\r
1263ecf2
JW
1093 if (NumberOfPages == 0) {\r
1094 return EFI_SUCCESS;\r
1095 }\r
e63da9f0
JW
1096 } else {\r
1097 AdjustMemoryA (&Start, &NumberOfPages);\r
1098 }\r
1099\r
6cf0a677 1100 return CoreConvertPages (Start, NumberOfPages, NewType);\r
e63da9f0
JW
1101}\r
1102\r
7fef06af
JW
1103/**\r
1104 Set all Guard pages which cannot be set before CPU Arch Protocol installed.\r
1105**/\r
1106VOID\r
1107SetAllGuardPages (\r
1108 VOID\r
1109 )\r
1110{\r
1436aea4
MK
1111 UINTN Entries[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1112 UINTN Shifts[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1113 UINTN Indices[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1114 UINT64 Tables[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1115 UINT64 Addresses[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1116 UINT64 TableEntry;\r
1117 UINT64 Address;\r
1118 UINT64 GuardPage;\r
1119 INTN Level;\r
1120 UINTN Index;\r
1121 BOOLEAN OnGuarding;\r
1122\r
1123 if ((mGuardedMemoryMap == 0) ||\r
1124 (mMapLevel == 0) ||\r
1125 (mMapLevel > GUARDED_HEAP_MAP_TABLE_DEPTH))\r
1126 {\r
7fef06af
JW
1127 return;\r
1128 }\r
1129\r
1130 CopyMem (Entries, mLevelMask, sizeof (Entries));\r
1131 CopyMem (Shifts, mLevelShift, sizeof (Shifts));\r
1132\r
1436aea4
MK
1133 SetMem (Tables, sizeof (Tables), 0);\r
1134 SetMem (Addresses, sizeof (Addresses), 0);\r
1135 SetMem (Indices, sizeof (Indices), 0);\r
7fef06af
JW
1136\r
1137 Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;\r
1138 Tables[Level] = mGuardedMemoryMap;\r
1139 Address = 0;\r
1140 OnGuarding = FALSE;\r
1141\r
1142 DEBUG_CODE (\r
1143 DumpGuardedMemoryBitmap ();\r
1436aea4 1144 );\r
7fef06af
JW
1145\r
1146 while (TRUE) {\r
1147 if (Indices[Level] > Entries[Level]) {\r
1148 Tables[Level] = 0;\r
1149 Level -= 1;\r
1150 } else {\r
1436aea4
MK
1151 TableEntry = ((UINT64 *)(UINTN)(Tables[Level]))[Indices[Level]];\r
1152 Address = Addresses[Level];\r
7fef06af
JW
1153\r
1154 if (TableEntry == 0) {\r
7fef06af 1155 OnGuarding = FALSE;\r
7fef06af 1156 } else if (Level < GUARDED_HEAP_MAP_TABLE_DEPTH - 1) {\r
1436aea4
MK
1157 Level += 1;\r
1158 Tables[Level] = TableEntry;\r
1159 Addresses[Level] = Address;\r
1160 Indices[Level] = 0;\r
7fef06af
JW
1161\r
1162 continue;\r
7fef06af 1163 } else {\r
7fef06af
JW
1164 Index = 0;\r
1165 while (Index < GUARDED_HEAP_MAP_ENTRY_BITS) {\r
1166 if ((TableEntry & 1) == 1) {\r
1167 if (OnGuarding) {\r
1168 GuardPage = 0;\r
1169 } else {\r
1170 GuardPage = Address - EFI_PAGE_SIZE;\r
1171 }\r
1436aea4 1172\r
7fef06af
JW
1173 OnGuarding = TRUE;\r
1174 } else {\r
1175 if (OnGuarding) {\r
1176 GuardPage = Address;\r
1177 } else {\r
1178 GuardPage = 0;\r
1179 }\r
1436aea4 1180\r
7fef06af
JW
1181 OnGuarding = FALSE;\r
1182 }\r
1183\r
1184 if (GuardPage != 0) {\r
1185 SetGuardPage (GuardPage);\r
1186 }\r
1187\r
1188 if (TableEntry == 0) {\r
1189 break;\r
1190 }\r
1191\r
1192 TableEntry = RShiftU64 (TableEntry, 1);\r
1193 Address += EFI_PAGE_SIZE;\r
1194 Index += 1;\r
1195 }\r
1196 }\r
1197 }\r
1198\r
1199 if (Level < (GUARDED_HEAP_MAP_TABLE_DEPTH - (INTN)mMapLevel)) {\r
1200 break;\r
1201 }\r
1202\r
1436aea4
MK
1203 Indices[Level] += 1;\r
1204 Address = (Level == 0) ? 0 : Addresses[Level - 1];\r
1205 Addresses[Level] = Address | LShiftU64 (Indices[Level], Shifts[Level]);\r
7fef06af
JW
1206 }\r
1207}\r
1208\r
63ebde8e
JW
1209/**\r
1210 Find the address of top-most guarded free page.\r
1211\r
1212 @param[out] Address Start address of top-most guarded free page.\r
1213\r
1214 @return VOID.\r
1215**/\r
1216VOID\r
1217GetLastGuardedFreePageAddress (\r
1436aea4 1218 OUT EFI_PHYSICAL_ADDRESS *Address\r
63ebde8e
JW
1219 )\r
1220{\r
1436aea4
MK
1221 EFI_PHYSICAL_ADDRESS AddressGranularity;\r
1222 EFI_PHYSICAL_ADDRESS BaseAddress;\r
1223 UINTN Level;\r
1224 UINT64 Map;\r
1225 INTN Index;\r
63ebde8e
JW
1226\r
1227 ASSERT (mMapLevel >= 1);\r
1228\r
1229 BaseAddress = 0;\r
1436aea4 1230 Map = mGuardedMemoryMap;\r
63ebde8e
JW
1231 for (Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;\r
1232 Level < GUARDED_HEAP_MAP_TABLE_DEPTH;\r
1436aea4
MK
1233 ++Level)\r
1234 {\r
63ebde8e
JW
1235 AddressGranularity = LShiftU64 (1, mLevelShift[Level]);\r
1236\r
1237 //\r
1238 // Find the non-NULL entry at largest index.\r
1239 //\r
1436aea4 1240 for (Index = (INTN)mLevelMask[Level]; Index >= 0; --Index) {\r
63ebde8e
JW
1241 if (((UINT64 *)(UINTN)Map)[Index] != 0) {\r
1242 BaseAddress += MultU64x32 (AddressGranularity, (UINT32)Index);\r
1436aea4 1243 Map = ((UINT64 *)(UINTN)Map)[Index];\r
63ebde8e
JW
1244 break;\r
1245 }\r
1246 }\r
1247 }\r
1248\r
1249 //\r
1250 // Find the non-zero MSB then get the page address.\r
1251 //\r
1252 while (Map != 0) {\r
1436aea4 1253 Map = RShiftU64 (Map, 1);\r
63ebde8e
JW
1254 BaseAddress += EFI_PAGES_TO_SIZE (1);\r
1255 }\r
1256\r
1257 *Address = BaseAddress;\r
1258}\r
1259\r
1260/**\r
1261 Record freed pages.\r
1262\r
1263 @param[in] BaseAddress Base address of just freed pages.\r
1264 @param[in] Pages Number of freed pages.\r
1265\r
1266 @return VOID.\r
1267**/\r
1268VOID\r
1269MarkFreedPages (\r
1436aea4
MK
1270 IN EFI_PHYSICAL_ADDRESS BaseAddress,\r
1271 IN UINTN Pages\r
63ebde8e
JW
1272 )\r
1273{\r
1274 SetGuardedMemoryBits (BaseAddress, Pages);\r
1275}\r
1276\r
1277/**\r
1278 Record freed pages as well as mark them as not-present.\r
1279\r
1280 @param[in] BaseAddress Base address of just freed pages.\r
1281 @param[in] Pages Number of freed pages.\r
1282\r
1283 @return VOID.\r
1284**/\r
1285VOID\r
1286EFIAPI\r
1287GuardFreedPages (\r
1436aea4
MK
1288 IN EFI_PHYSICAL_ADDRESS BaseAddress,\r
1289 IN UINTN Pages\r
63ebde8e
JW
1290 )\r
1291{\r
1436aea4 1292 EFI_STATUS Status;\r
63ebde8e
JW
1293\r
1294 //\r
1295 // Legacy memory lower than 1MB might be accessed with no allocation. Leave\r
1296 // them alone.\r
1297 //\r
1298 if (BaseAddress < BASE_1MB) {\r
1299 return;\r
1300 }\r
1301\r
1302 MarkFreedPages (BaseAddress, Pages);\r
1303 if (gCpu != NULL) {\r
1304 //\r
1305 // Set flag to make sure allocating memory without GUARD for page table\r
1306 // operation; otherwise infinite loops could be caused.\r
1307 //\r
1308 mOnGuarding = TRUE;\r
1309 //\r
1310 // Note: This might overwrite other attributes needed by other features,\r
1311 // such as NX memory protection.\r
1312 //\r
1313 Status = gCpu->SetMemoryAttributes (\r
1314 gCpu,\r
1315 BaseAddress,\r
1316 EFI_PAGES_TO_SIZE (Pages),\r
1317 EFI_MEMORY_RP\r
1318 );\r
1319 //\r
1320 // Normally we should ASSERT the returned Status. But there might be memory\r
1321 // alloc/free involved in SetMemoryAttributes(), which might fail this\r
1322 // calling. It's rare case so it's OK to let a few tiny holes be not-guarded.\r
1323 //\r
1324 if (EFI_ERROR (Status)) {\r
1325 DEBUG ((DEBUG_WARN, "Failed to guard freed pages: %p (%lu)\n", BaseAddress, (UINT64)Pages));\r
1326 }\r
1436aea4 1327\r
63ebde8e
JW
1328 mOnGuarding = FALSE;\r
1329 }\r
1330}\r
1331\r
1332/**\r
1333 Record freed pages as well as mark them as not-present, if enabled.\r
1334\r
1335 @param[in] BaseAddress Base address of just freed pages.\r
1336 @param[in] Pages Number of freed pages.\r
1337\r
1338 @return VOID.\r
1339**/\r
1340VOID\r
1341EFIAPI\r
1342GuardFreedPagesChecked (\r
1436aea4
MK
1343 IN EFI_PHYSICAL_ADDRESS BaseAddress,\r
1344 IN UINTN Pages\r
63ebde8e
JW
1345 )\r
1346{\r
1347 if (IsHeapGuardEnabled (GUARD_HEAP_TYPE_FREED)) {\r
1348 GuardFreedPages (BaseAddress, Pages);\r
1349 }\r
1350}\r
1351\r
1352/**\r
1353 Mark all pages freed before CPU Arch Protocol as not-present.\r
1354\r
1355**/\r
1356VOID\r
1357GuardAllFreedPages (\r
1358 VOID\r
1359 )\r
1360{\r
1436aea4
MK
1361 UINTN Entries[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1362 UINTN Shifts[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1363 UINTN Indices[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1364 UINT64 Tables[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1365 UINT64 Addresses[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1366 UINT64 TableEntry;\r
1367 UINT64 Address;\r
1368 UINT64 GuardPage;\r
1369 INTN Level;\r
1370 UINT64 BitIndex;\r
1371 UINTN GuardPageNumber;\r
1372\r
1373 if ((mGuardedMemoryMap == 0) ||\r
1374 (mMapLevel == 0) ||\r
1375 (mMapLevel > GUARDED_HEAP_MAP_TABLE_DEPTH))\r
1376 {\r
63ebde8e
JW
1377 return;\r
1378 }\r
1379\r
1380 CopyMem (Entries, mLevelMask, sizeof (Entries));\r
1381 CopyMem (Shifts, mLevelShift, sizeof (Shifts));\r
1382\r
1436aea4
MK
1383 SetMem (Tables, sizeof (Tables), 0);\r
1384 SetMem (Addresses, sizeof (Addresses), 0);\r
1385 SetMem (Indices, sizeof (Indices), 0);\r
63ebde8e
JW
1386\r
1387 Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;\r
1388 Tables[Level] = mGuardedMemoryMap;\r
1389 Address = 0;\r
1390 GuardPage = (UINT64)-1;\r
1391 GuardPageNumber = 0;\r
1392\r
1393 while (TRUE) {\r
1394 if (Indices[Level] > Entries[Level]) {\r
1395 Tables[Level] = 0;\r
1396 Level -= 1;\r
1397 } else {\r
1436aea4
MK
1398 TableEntry = ((UINT64 *)(UINTN)(Tables[Level]))[Indices[Level]];\r
1399 Address = Addresses[Level];\r
63ebde8e
JW
1400\r
1401 if (Level < GUARDED_HEAP_MAP_TABLE_DEPTH - 1) {\r
1436aea4
MK
1402 Level += 1;\r
1403 Tables[Level] = TableEntry;\r
1404 Addresses[Level] = Address;\r
1405 Indices[Level] = 0;\r
63ebde8e
JW
1406\r
1407 continue;\r
1408 } else {\r
1409 BitIndex = 1;\r
1410 while (BitIndex != 0) {\r
1411 if ((TableEntry & BitIndex) != 0) {\r
1412 if (GuardPage == (UINT64)-1) {\r
1413 GuardPage = Address;\r
1414 }\r
1436aea4 1415\r
63ebde8e
JW
1416 ++GuardPageNumber;\r
1417 } else if (GuardPageNumber > 0) {\r
1418 GuardFreedPages (GuardPage, GuardPageNumber);\r
1419 GuardPageNumber = 0;\r
1420 GuardPage = (UINT64)-1;\r
1421 }\r
1422\r
1423 if (TableEntry == 0) {\r
1424 break;\r
1425 }\r
1426\r
1427 Address += EFI_PAGES_TO_SIZE (1);\r
1428 BitIndex = LShiftU64 (BitIndex, 1);\r
1429 }\r
1430 }\r
1431 }\r
1432\r
1433 if (Level < (GUARDED_HEAP_MAP_TABLE_DEPTH - (INTN)mMapLevel)) {\r
1434 break;\r
1435 }\r
1436\r
1436aea4
MK
1437 Indices[Level] += 1;\r
1438 Address = (Level == 0) ? 0 : Addresses[Level - 1];\r
63ebde8e 1439 Addresses[Level] = Address | LShiftU64 (Indices[Level], Shifts[Level]);\r
63ebde8e
JW
1440 }\r
1441\r
1442 //\r
1443 // Update the maximum address of freed page which can be used for memory\r
1444 // promotion upon out-of-memory-space.\r
1445 //\r
1446 GetLastGuardedFreePageAddress (&Address);\r
1447 if (Address != 0) {\r
1448 mLastPromotedPage = Address;\r
1449 }\r
1450}\r
1451\r
1452/**\r
1453 This function checks to see if the given memory map descriptor in a memory map\r
1454 can be merged with any guarded free pages.\r
1455\r
1456 @param MemoryMapEntry A pointer to a descriptor in MemoryMap.\r
1457 @param MaxAddress Maximum address to stop the merge.\r
1458\r
1459 @return VOID\r
1460\r
1461**/\r
1462VOID\r
1463MergeGuardPages (\r
1436aea4
MK
1464 IN EFI_MEMORY_DESCRIPTOR *MemoryMapEntry,\r
1465 IN EFI_PHYSICAL_ADDRESS MaxAddress\r
63ebde8e
JW
1466 )\r
1467{\r
1436aea4
MK
1468 EFI_PHYSICAL_ADDRESS EndAddress;\r
1469 UINT64 Bitmap;\r
1470 INTN Pages;\r
63ebde8e
JW
1471\r
1472 if (!IsHeapGuardEnabled (GUARD_HEAP_TYPE_FREED) ||\r
1436aea4
MK
1473 (MemoryMapEntry->Type >= EfiMemoryMappedIO))\r
1474 {\r
63ebde8e
JW
1475 return;\r
1476 }\r
1477\r
1478 Bitmap = 0;\r
e5001ab7
JW
1479 Pages = EFI_SIZE_TO_PAGES ((UINTN)(MaxAddress - MemoryMapEntry->PhysicalStart));\r
1480 Pages -= (INTN)MemoryMapEntry->NumberOfPages;\r
63ebde8e
JW
1481 while (Pages > 0) {\r
1482 if (Bitmap == 0) {\r
1483 EndAddress = MemoryMapEntry->PhysicalStart +\r
e5001ab7 1484 EFI_PAGES_TO_SIZE ((UINTN)MemoryMapEntry->NumberOfPages);\r
63ebde8e
JW
1485 Bitmap = GetGuardedMemoryBits (EndAddress, GUARDED_HEAP_MAP_ENTRY_BITS);\r
1486 }\r
1487\r
1488 if ((Bitmap & 1) == 0) {\r
1489 break;\r
1490 }\r
1491\r
1492 Pages--;\r
1493 MemoryMapEntry->NumberOfPages++;\r
1494 Bitmap = RShiftU64 (Bitmap, 1);\r
1495 }\r
1496}\r
1497\r
1498/**\r
1499 Put part (at most 64 pages a time) guarded free pages back to free page pool.\r
1500\r
1501 Freed memory guard is used to detect Use-After-Free (UAF) memory issue, which\r
1502 makes use of 'Used then throw away' way to detect any illegal access to freed\r
1503 memory. The thrown-away memory will be marked as not-present so that any access\r
1504 to those memory (after free) will be caught by page-fault exception.\r
1505\r
1506 The problem is that this will consume lots of memory space. Once no memory\r
1507 left in pool to allocate, we have to restore part of the freed pages to their\r
1508 normal function. Otherwise the whole system will stop functioning.\r
1509\r
1510 @param StartAddress Start address of promoted memory.\r
1511 @param EndAddress End address of promoted memory.\r
1512\r
1513 @return TRUE Succeeded to promote memory.\r
1514 @return FALSE No free memory found.\r
1515\r
1516**/\r
1517BOOLEAN\r
1518PromoteGuardedFreePages (\r
1436aea4
MK
1519 OUT EFI_PHYSICAL_ADDRESS *StartAddress,\r
1520 OUT EFI_PHYSICAL_ADDRESS *EndAddress\r
63ebde8e
JW
1521 )\r
1522{\r
1436aea4
MK
1523 EFI_STATUS Status;\r
1524 UINTN AvailablePages;\r
1525 UINT64 Bitmap;\r
1526 EFI_PHYSICAL_ADDRESS Start;\r
63ebde8e
JW
1527\r
1528 if (!IsHeapGuardEnabled (GUARD_HEAP_TYPE_FREED)) {\r
1529 return FALSE;\r
1530 }\r
1531\r
1532 //\r
1533 // Similar to memory allocation service, always search the freed pages in\r
1534 // descending direction.\r
1535 //\r
1436aea4
MK
1536 Start = mLastPromotedPage;\r
1537 AvailablePages = 0;\r
63ebde8e
JW
1538 while (AvailablePages == 0) {\r
1539 Start -= EFI_PAGES_TO_SIZE (GUARDED_HEAP_MAP_ENTRY_BITS);\r
1540 //\r
1541 // If the address wraps around, try the really freed pages at top.\r
1542 //\r
1543 if (Start > mLastPromotedPage) {\r
1544 GetLastGuardedFreePageAddress (&Start);\r
1545 ASSERT (Start != 0);\r
1546 Start -= EFI_PAGES_TO_SIZE (GUARDED_HEAP_MAP_ENTRY_BITS);\r
1547 }\r
1548\r
1549 Bitmap = GetGuardedMemoryBits (Start, GUARDED_HEAP_MAP_ENTRY_BITS);\r
1550 while (Bitmap > 0) {\r
1551 if ((Bitmap & 1) != 0) {\r
1552 ++AvailablePages;\r
1553 } else if (AvailablePages == 0) {\r
1554 Start += EFI_PAGES_TO_SIZE (1);\r
1555 } else {\r
1556 break;\r
1557 }\r
1558\r
1559 Bitmap = RShiftU64 (Bitmap, 1);\r
1560 }\r
1561 }\r
1562\r
895415ed 1563 if (AvailablePages != 0) {\r
63ebde8e
JW
1564 DEBUG ((DEBUG_INFO, "Promoted pages: %lX (%lx)\r\n", Start, (UINT64)AvailablePages));\r
1565 ClearGuardedMemoryBits (Start, AvailablePages);\r
1566\r
1567 if (gCpu != NULL) {\r
1568 //\r
1569 // Set flag to make sure allocating memory without GUARD for page table\r
1570 // operation; otherwise infinite loops could be caused.\r
1571 //\r
1572 mOnGuarding = TRUE;\r
1436aea4 1573 Status = gCpu->SetMemoryAttributes (gCpu, Start, EFI_PAGES_TO_SIZE (AvailablePages), 0);\r
63ebde8e
JW
1574 ASSERT_EFI_ERROR (Status);\r
1575 mOnGuarding = FALSE;\r
1576 }\r
1577\r
1578 mLastPromotedPage = Start;\r
1579 *StartAddress = Start;\r
1580 *EndAddress = Start + EFI_PAGES_TO_SIZE (AvailablePages) - 1;\r
1581 return TRUE;\r
1582 }\r
1583\r
1584 return FALSE;\r
1585}\r
1586\r
7fef06af
JW
1587/**\r
1588 Notify function used to set all Guard pages before CPU Arch Protocol installed.\r
1589**/\r
1590VOID\r
1591HeapGuardCpuArchProtocolNotify (\r
1592 VOID\r
1593 )\r
1594{\r
1595 ASSERT (gCpu != NULL);\r
63ebde8e
JW
1596\r
1597 if (IsHeapGuardEnabled (GUARD_HEAP_TYPE_PAGE|GUARD_HEAP_TYPE_POOL) &&\r
1436aea4
MK
1598 IsHeapGuardEnabled (GUARD_HEAP_TYPE_FREED))\r
1599 {\r
63ebde8e
JW
1600 DEBUG ((DEBUG_ERROR, "Heap guard and freed memory guard cannot be enabled at the same time.\n"));\r
1601 CpuDeadLoop ();\r
1602 }\r
1603\r
1604 if (IsHeapGuardEnabled (GUARD_HEAP_TYPE_PAGE|GUARD_HEAP_TYPE_POOL)) {\r
1605 SetAllGuardPages ();\r
1606 }\r
1607\r
1608 if (IsHeapGuardEnabled (GUARD_HEAP_TYPE_FREED)) {\r
1609 GuardAllFreedPages ();\r
1610 }\r
7fef06af
JW
1611}\r
1612\r
e63da9f0
JW
1613/**\r
1614 Helper function to convert a UINT64 value in binary to a string.\r
1615\r
1616 @param[in] Value Value of a UINT64 integer.\r
1617 @param[out] BinString String buffer to contain the conversion result.\r
1618\r
1619 @return VOID.\r
1620**/\r
1621VOID\r
1622Uint64ToBinString (\r
1436aea4
MK
1623 IN UINT64 Value,\r
1624 OUT CHAR8 *BinString\r
e63da9f0
JW
1625 )\r
1626{\r
1436aea4 1627 UINTN Index;\r
e63da9f0
JW
1628\r
1629 if (BinString == NULL) {\r
1630 return;\r
1631 }\r
1632\r
1633 for (Index = 64; Index > 0; --Index) {\r
1634 BinString[Index - 1] = '0' + (Value & 1);\r
1436aea4 1635 Value = RShiftU64 (Value, 1);\r
e63da9f0 1636 }\r
1436aea4 1637\r
e63da9f0
JW
1638 BinString[64] = '\0';\r
1639}\r
1640\r
1641/**\r
1642 Dump the guarded memory bit map.\r
1643**/\r
1644VOID\r
1645EFIAPI\r
1646DumpGuardedMemoryBitmap (\r
1647 VOID\r
1648 )\r
1649{\r
1436aea4
MK
1650 UINTN Entries[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1651 UINTN Shifts[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1652 UINTN Indices[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1653 UINT64 Tables[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1654 UINT64 Addresses[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1655 UINT64 TableEntry;\r
1656 UINT64 Address;\r
1657 INTN Level;\r
1658 UINTN RepeatZero;\r
1659 CHAR8 String[GUARDED_HEAP_MAP_ENTRY_BITS + 1];\r
1660 CHAR8 *Ruler1;\r
1661 CHAR8 *Ruler2;\r
e63da9f0 1662\r
63ebde8e
JW
1663 if (!IsHeapGuardEnabled (GUARD_HEAP_TYPE_ALL)) {\r
1664 return;\r
1665 }\r
1666\r
1436aea4
MK
1667 if ((mGuardedMemoryMap == 0) ||\r
1668 (mMapLevel == 0) ||\r
1669 (mMapLevel > GUARDED_HEAP_MAP_TABLE_DEPTH))\r
1670 {\r
e63da9f0
JW
1671 return;\r
1672 }\r
1673\r
1674 Ruler1 = " 3 2 1 0";\r
1675 Ruler2 = "FEDCBA9876543210FEDCBA9876543210FEDCBA9876543210FEDCBA9876543210";\r
1676\r
1436aea4
MK
1677 DEBUG ((\r
1678 HEAP_GUARD_DEBUG_LEVEL,\r
1679 "============================="\r
1680 " Guarded Memory Bitmap "\r
1681 "==============================\r\n"\r
1682 ));\r
e63da9f0
JW
1683 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, " %a\r\n", Ruler1));\r
1684 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, " %a\r\n", Ruler2));\r
1685\r
1686 CopyMem (Entries, mLevelMask, sizeof (Entries));\r
1687 CopyMem (Shifts, mLevelShift, sizeof (Shifts));\r
1688\r
1436aea4
MK
1689 SetMem (Indices, sizeof (Indices), 0);\r
1690 SetMem (Tables, sizeof (Tables), 0);\r
1691 SetMem (Addresses, sizeof (Addresses), 0);\r
e63da9f0
JW
1692\r
1693 Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;\r
1694 Tables[Level] = mGuardedMemoryMap;\r
1695 Address = 0;\r
1696 RepeatZero = 0;\r
1697\r
1698 while (TRUE) {\r
1699 if (Indices[Level] > Entries[Level]) {\r
e63da9f0
JW
1700 Tables[Level] = 0;\r
1701 Level -= 1;\r
1702 RepeatZero = 0;\r
1703\r
1704 DEBUG ((\r
1705 HEAP_GUARD_DEBUG_LEVEL,\r
1706 "========================================="\r
1707 "=========================================\r\n"\r
1708 ));\r
e63da9f0 1709 } else {\r
1436aea4
MK
1710 TableEntry = ((UINT64 *)(UINTN)Tables[Level])[Indices[Level]];\r
1711 Address = Addresses[Level];\r
e63da9f0
JW
1712\r
1713 if (TableEntry == 0) {\r
e63da9f0
JW
1714 if (Level == GUARDED_HEAP_MAP_TABLE_DEPTH - 1) {\r
1715 if (RepeatZero == 0) {\r
1436aea4 1716 Uint64ToBinString (TableEntry, String);\r
e63da9f0
JW
1717 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "%016lx: %a\r\n", Address, String));\r
1718 } else if (RepeatZero == 1) {\r
1719 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "... : ...\r\n"));\r
1720 }\r
1436aea4 1721\r
e63da9f0
JW
1722 RepeatZero += 1;\r
1723 }\r
e63da9f0 1724 } else if (Level < GUARDED_HEAP_MAP_TABLE_DEPTH - 1) {\r
1436aea4
MK
1725 Level += 1;\r
1726 Tables[Level] = TableEntry;\r
1727 Addresses[Level] = Address;\r
1728 Indices[Level] = 0;\r
1729 RepeatZero = 0;\r
e63da9f0
JW
1730\r
1731 continue;\r
e63da9f0 1732 } else {\r
e63da9f0 1733 RepeatZero = 0;\r
1436aea4 1734 Uint64ToBinString (TableEntry, String);\r
e63da9f0 1735 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "%016lx: %a\r\n", Address, String));\r
e63da9f0
JW
1736 }\r
1737 }\r
1738\r
1739 if (Level < (GUARDED_HEAP_MAP_TABLE_DEPTH - (INTN)mMapLevel)) {\r
1740 break;\r
1741 }\r
1742\r
1436aea4
MK
1743 Indices[Level] += 1;\r
1744 Address = (Level == 0) ? 0 : Addresses[Level - 1];\r
1745 Addresses[Level] = Address | LShiftU64 (Indices[Level], Shifts[Level]);\r
e63da9f0
JW
1746 }\r
1747}\r