]> git.proxmox.com Git - mirror_edk2.git/blame_incremental - MdeModulePkg/Core/Dxe/Mem/HeapGuard.c
MdeModulePkg/Core: allow HeapGuard even before CpuArchProtocol installed
[mirror_edk2.git] / MdeModulePkg / Core / Dxe / Mem / HeapGuard.c
... / ...
CommitLineData
1/** @file\r
2 UEFI Heap Guard functions.\r
3\r
4Copyright (c) 2017-2018, Intel Corporation. All rights reserved.<BR>\r
5This program and the accompanying materials\r
6are licensed and made available under the terms and conditions of the BSD License\r
7which accompanies this distribution. The full text of the license may be found at\r
8http://opensource.org/licenses/bsd-license.php\r
9\r
10THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
11WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
12\r
13**/\r
14\r
15#include "DxeMain.h"\r
16#include "Imem.h"\r
17#include "HeapGuard.h"\r
18\r
19//\r
20// Global to avoid infinite reentrance of memory allocation when updating\r
21// page table attributes, which may need allocate pages for new PDE/PTE.\r
22//\r
23GLOBAL_REMOVE_IF_UNREFERENCED BOOLEAN mOnGuarding = FALSE;\r
24\r
25//\r
26// Pointer to table tracking the Guarded memory with bitmap, in which '1'\r
27// is used to indicate memory guarded. '0' might be free memory or Guard\r
28// page itself, depending on status of memory adjacent to it.\r
29//\r
30GLOBAL_REMOVE_IF_UNREFERENCED UINT64 mGuardedMemoryMap = 0;\r
31\r
32//\r
33// Current depth level of map table pointed by mGuardedMemoryMap.\r
34// mMapLevel must be initialized at least by 1. It will be automatically\r
35// updated according to the address of memory just tracked.\r
36//\r
37GLOBAL_REMOVE_IF_UNREFERENCED UINTN mMapLevel = 1;\r
38\r
39//\r
40// Shift and mask for each level of map table\r
41//\r
42GLOBAL_REMOVE_IF_UNREFERENCED UINTN mLevelShift[GUARDED_HEAP_MAP_TABLE_DEPTH]\r
43 = GUARDED_HEAP_MAP_TABLE_DEPTH_SHIFTS;\r
44GLOBAL_REMOVE_IF_UNREFERENCED UINTN mLevelMask[GUARDED_HEAP_MAP_TABLE_DEPTH]\r
45 = GUARDED_HEAP_MAP_TABLE_DEPTH_MASKS;\r
46\r
47/**\r
48 Set corresponding bits in bitmap table to 1 according to the address.\r
49\r
50 @param[in] Address Start address to set for.\r
51 @param[in] BitNumber Number of bits to set.\r
52 @param[in] BitMap Pointer to bitmap which covers the Address.\r
53\r
54 @return VOID.\r
55**/\r
56STATIC\r
57VOID\r
58SetBits (\r
59 IN EFI_PHYSICAL_ADDRESS Address,\r
60 IN UINTN BitNumber,\r
61 IN UINT64 *BitMap\r
62 )\r
63{\r
64 UINTN Lsbs;\r
65 UINTN Qwords;\r
66 UINTN Msbs;\r
67 UINTN StartBit;\r
68 UINTN EndBit;\r
69\r
70 StartBit = (UINTN)GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address);\r
71 EndBit = (StartBit + BitNumber - 1) % GUARDED_HEAP_MAP_ENTRY_BITS;\r
72\r
73 if ((StartBit + BitNumber) > GUARDED_HEAP_MAP_ENTRY_BITS) {\r
74 Msbs = (GUARDED_HEAP_MAP_ENTRY_BITS - StartBit) %\r
75 GUARDED_HEAP_MAP_ENTRY_BITS;\r
76 Lsbs = (EndBit + 1) % GUARDED_HEAP_MAP_ENTRY_BITS;\r
77 Qwords = (BitNumber - Msbs) / GUARDED_HEAP_MAP_ENTRY_BITS;\r
78 } else {\r
79 Msbs = BitNumber;\r
80 Lsbs = 0;\r
81 Qwords = 0;\r
82 }\r
83\r
84 if (Msbs > 0) {\r
85 *BitMap |= LShiftU64 (LShiftU64 (1, Msbs) - 1, StartBit);\r
86 BitMap += 1;\r
87 }\r
88\r
89 if (Qwords > 0) {\r
90 SetMem64 ((VOID *)BitMap, Qwords * GUARDED_HEAP_MAP_ENTRY_BYTES,\r
91 (UINT64)-1);\r
92 BitMap += Qwords;\r
93 }\r
94\r
95 if (Lsbs > 0) {\r
96 *BitMap |= (LShiftU64 (1, Lsbs) - 1);\r
97 }\r
98}\r
99\r
100/**\r
101 Set corresponding bits in bitmap table to 0 according to the address.\r
102\r
103 @param[in] Address Start address to set for.\r
104 @param[in] BitNumber Number of bits to set.\r
105 @param[in] BitMap Pointer to bitmap which covers the Address.\r
106\r
107 @return VOID.\r
108**/\r
109STATIC\r
110VOID\r
111ClearBits (\r
112 IN EFI_PHYSICAL_ADDRESS Address,\r
113 IN UINTN BitNumber,\r
114 IN UINT64 *BitMap\r
115 )\r
116{\r
117 UINTN Lsbs;\r
118 UINTN Qwords;\r
119 UINTN Msbs;\r
120 UINTN StartBit;\r
121 UINTN EndBit;\r
122\r
123 StartBit = (UINTN)GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address);\r
124 EndBit = (StartBit + BitNumber - 1) % GUARDED_HEAP_MAP_ENTRY_BITS;\r
125\r
126 if ((StartBit + BitNumber) > GUARDED_HEAP_MAP_ENTRY_BITS) {\r
127 Msbs = (GUARDED_HEAP_MAP_ENTRY_BITS - StartBit) %\r
128 GUARDED_HEAP_MAP_ENTRY_BITS;\r
129 Lsbs = (EndBit + 1) % GUARDED_HEAP_MAP_ENTRY_BITS;\r
130 Qwords = (BitNumber - Msbs) / GUARDED_HEAP_MAP_ENTRY_BITS;\r
131 } else {\r
132 Msbs = BitNumber;\r
133 Lsbs = 0;\r
134 Qwords = 0;\r
135 }\r
136\r
137 if (Msbs > 0) {\r
138 *BitMap &= ~LShiftU64 (LShiftU64 (1, Msbs) - 1, StartBit);\r
139 BitMap += 1;\r
140 }\r
141\r
142 if (Qwords > 0) {\r
143 SetMem64 ((VOID *)BitMap, Qwords * GUARDED_HEAP_MAP_ENTRY_BYTES, 0);\r
144 BitMap += Qwords;\r
145 }\r
146\r
147 if (Lsbs > 0) {\r
148 *BitMap &= ~(LShiftU64 (1, Lsbs) - 1);\r
149 }\r
150}\r
151\r
152/**\r
153 Get corresponding bits in bitmap table according to the address.\r
154\r
155 The value of bit 0 corresponds to the status of memory at given Address.\r
156 No more than 64 bits can be retrieved in one call.\r
157\r
158 @param[in] Address Start address to retrieve bits for.\r
159 @param[in] BitNumber Number of bits to get.\r
160 @param[in] BitMap Pointer to bitmap which covers the Address.\r
161\r
162 @return An integer containing the bits information.\r
163**/\r
164STATIC\r
165UINT64\r
166GetBits (\r
167 IN EFI_PHYSICAL_ADDRESS Address,\r
168 IN UINTN BitNumber,\r
169 IN UINT64 *BitMap\r
170 )\r
171{\r
172 UINTN StartBit;\r
173 UINTN EndBit;\r
174 UINTN Lsbs;\r
175 UINTN Msbs;\r
176 UINT64 Result;\r
177\r
178 ASSERT (BitNumber <= GUARDED_HEAP_MAP_ENTRY_BITS);\r
179\r
180 StartBit = (UINTN)GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address);\r
181 EndBit = (StartBit + BitNumber - 1) % GUARDED_HEAP_MAP_ENTRY_BITS;\r
182\r
183 if ((StartBit + BitNumber) > GUARDED_HEAP_MAP_ENTRY_BITS) {\r
184 Msbs = GUARDED_HEAP_MAP_ENTRY_BITS - StartBit;\r
185 Lsbs = (EndBit + 1) % GUARDED_HEAP_MAP_ENTRY_BITS;\r
186 } else {\r
187 Msbs = BitNumber;\r
188 Lsbs = 0;\r
189 }\r
190\r
191 Result = RShiftU64 ((*BitMap), StartBit) & (LShiftU64 (1, Msbs) - 1);\r
192 if (Lsbs > 0) {\r
193 BitMap += 1;\r
194 Result |= LShiftU64 ((*BitMap) & (LShiftU64 (1, Lsbs) - 1), Msbs);\r
195 }\r
196\r
197 return Result;\r
198}\r
199\r
200/**\r
201 Locate the pointer of bitmap from the guarded memory bitmap tables, which\r
202 covers the given Address.\r
203\r
204 @param[in] Address Start address to search the bitmap for.\r
205 @param[in] AllocMapUnit Flag to indicate memory allocation for the table.\r
206 @param[out] BitMap Pointer to bitmap which covers the Address.\r
207\r
208 @return The bit number from given Address to the end of current map table.\r
209**/\r
210UINTN\r
211FindGuardedMemoryMap (\r
212 IN EFI_PHYSICAL_ADDRESS Address,\r
213 IN BOOLEAN AllocMapUnit,\r
214 OUT UINT64 **BitMap\r
215 )\r
216{\r
217 UINTN Level;\r
218 UINT64 *GuardMap;\r
219 UINT64 MapMemory;\r
220 UINTN Index;\r
221 UINTN Size;\r
222 UINTN BitsToUnitEnd;\r
223 EFI_STATUS Status;\r
224\r
225 //\r
226 // Adjust current map table depth according to the address to access\r
227 //\r
228 while (AllocMapUnit &&\r
229 mMapLevel < GUARDED_HEAP_MAP_TABLE_DEPTH &&\r
230 RShiftU64 (\r
231 Address,\r
232 mLevelShift[GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel - 1]\r
233 ) != 0) {\r
234\r
235 if (mGuardedMemoryMap != 0) {\r
236 Size = (mLevelMask[GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel - 1] + 1)\r
237 * GUARDED_HEAP_MAP_ENTRY_BYTES;\r
238 Status = CoreInternalAllocatePages (\r
239 AllocateAnyPages,\r
240 EfiBootServicesData,\r
241 EFI_SIZE_TO_PAGES (Size),\r
242 &MapMemory,\r
243 FALSE\r
244 );\r
245 ASSERT_EFI_ERROR (Status);\r
246 ASSERT (MapMemory != 0);\r
247\r
248 SetMem ((VOID *)(UINTN)MapMemory, Size, 0);\r
249\r
250 *(UINT64 *)(UINTN)MapMemory = mGuardedMemoryMap;\r
251 mGuardedMemoryMap = MapMemory;\r
252 }\r
253\r
254 mMapLevel++;\r
255\r
256 }\r
257\r
258 GuardMap = &mGuardedMemoryMap;\r
259 for (Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;\r
260 Level < GUARDED_HEAP_MAP_TABLE_DEPTH;\r
261 ++Level) {\r
262\r
263 if (*GuardMap == 0) {\r
264 if (!AllocMapUnit) {\r
265 GuardMap = NULL;\r
266 break;\r
267 }\r
268\r
269 Size = (mLevelMask[Level] + 1) * GUARDED_HEAP_MAP_ENTRY_BYTES;\r
270 Status = CoreInternalAllocatePages (\r
271 AllocateAnyPages,\r
272 EfiBootServicesData,\r
273 EFI_SIZE_TO_PAGES (Size),\r
274 &MapMemory,\r
275 FALSE\r
276 );\r
277 ASSERT_EFI_ERROR (Status);\r
278 ASSERT (MapMemory != 0);\r
279\r
280 SetMem ((VOID *)(UINTN)MapMemory, Size, 0);\r
281 *GuardMap = MapMemory;\r
282 }\r
283\r
284 Index = (UINTN)RShiftU64 (Address, mLevelShift[Level]);\r
285 Index &= mLevelMask[Level];\r
286 GuardMap = (UINT64 *)(UINTN)((*GuardMap) + Index * sizeof (UINT64));\r
287\r
288 }\r
289\r
290 BitsToUnitEnd = GUARDED_HEAP_MAP_BITS - GUARDED_HEAP_MAP_BIT_INDEX (Address);\r
291 *BitMap = GuardMap;\r
292\r
293 return BitsToUnitEnd;\r
294}\r
295\r
296/**\r
297 Set corresponding bits in bitmap table to 1 according to given memory range.\r
298\r
299 @param[in] Address Memory address to guard from.\r
300 @param[in] NumberOfPages Number of pages to guard.\r
301\r
302 @return VOID.\r
303**/\r
304VOID\r
305EFIAPI\r
306SetGuardedMemoryBits (\r
307 IN EFI_PHYSICAL_ADDRESS Address,\r
308 IN UINTN NumberOfPages\r
309 )\r
310{\r
311 UINT64 *BitMap;\r
312 UINTN Bits;\r
313 UINTN BitsToUnitEnd;\r
314\r
315 while (NumberOfPages > 0) {\r
316 BitsToUnitEnd = FindGuardedMemoryMap (Address, TRUE, &BitMap);\r
317 ASSERT (BitMap != NULL);\r
318\r
319 if (NumberOfPages > BitsToUnitEnd) {\r
320 // Cross map unit\r
321 Bits = BitsToUnitEnd;\r
322 } else {\r
323 Bits = NumberOfPages;\r
324 }\r
325\r
326 SetBits (Address, Bits, BitMap);\r
327\r
328 NumberOfPages -= Bits;\r
329 Address += EFI_PAGES_TO_SIZE (Bits);\r
330 }\r
331}\r
332\r
333/**\r
334 Clear corresponding bits in bitmap table according to given memory range.\r
335\r
336 @param[in] Address Memory address to unset from.\r
337 @param[in] NumberOfPages Number of pages to unset guard.\r
338\r
339 @return VOID.\r
340**/\r
341VOID\r
342EFIAPI\r
343ClearGuardedMemoryBits (\r
344 IN EFI_PHYSICAL_ADDRESS Address,\r
345 IN UINTN NumberOfPages\r
346 )\r
347{\r
348 UINT64 *BitMap;\r
349 UINTN Bits;\r
350 UINTN BitsToUnitEnd;\r
351\r
352 while (NumberOfPages > 0) {\r
353 BitsToUnitEnd = FindGuardedMemoryMap (Address, TRUE, &BitMap);\r
354 ASSERT (BitMap != NULL);\r
355\r
356 if (NumberOfPages > BitsToUnitEnd) {\r
357 // Cross map unit\r
358 Bits = BitsToUnitEnd;\r
359 } else {\r
360 Bits = NumberOfPages;\r
361 }\r
362\r
363 ClearBits (Address, Bits, BitMap);\r
364\r
365 NumberOfPages -= Bits;\r
366 Address += EFI_PAGES_TO_SIZE (Bits);\r
367 }\r
368}\r
369\r
370/**\r
371 Retrieve corresponding bits in bitmap table according to given memory range.\r
372\r
373 @param[in] Address Memory address to retrieve from.\r
374 @param[in] NumberOfPages Number of pages to retrieve.\r
375\r
376 @return An integer containing the guarded memory bitmap.\r
377**/\r
378UINTN\r
379GetGuardedMemoryBits (\r
380 IN EFI_PHYSICAL_ADDRESS Address,\r
381 IN UINTN NumberOfPages\r
382 )\r
383{\r
384 UINT64 *BitMap;\r
385 UINTN Bits;\r
386 UINTN Result;\r
387 UINTN Shift;\r
388 UINTN BitsToUnitEnd;\r
389\r
390 ASSERT (NumberOfPages <= GUARDED_HEAP_MAP_ENTRY_BITS);\r
391\r
392 Result = 0;\r
393 Shift = 0;\r
394 while (NumberOfPages > 0) {\r
395 BitsToUnitEnd = FindGuardedMemoryMap (Address, FALSE, &BitMap);\r
396\r
397 if (NumberOfPages > BitsToUnitEnd) {\r
398 // Cross map unit\r
399 Bits = BitsToUnitEnd;\r
400 } else {\r
401 Bits = NumberOfPages;\r
402 }\r
403\r
404 if (BitMap != NULL) {\r
405 Result |= LShiftU64 (GetBits (Address, Bits, BitMap), Shift);\r
406 }\r
407\r
408 Shift += Bits;\r
409 NumberOfPages -= Bits;\r
410 Address += EFI_PAGES_TO_SIZE (Bits);\r
411 }\r
412\r
413 return Result;\r
414}\r
415\r
416/**\r
417 Get bit value in bitmap table for the given address.\r
418\r
419 @param[in] Address The address to retrieve for.\r
420\r
421 @return 1 or 0.\r
422**/\r
423UINTN\r
424EFIAPI\r
425GetGuardMapBit (\r
426 IN EFI_PHYSICAL_ADDRESS Address\r
427 )\r
428{\r
429 UINT64 *GuardMap;\r
430\r
431 FindGuardedMemoryMap (Address, FALSE, &GuardMap);\r
432 if (GuardMap != NULL) {\r
433 if (RShiftU64 (*GuardMap,\r
434 GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address)) & 1) {\r
435 return 1;\r
436 }\r
437 }\r
438\r
439 return 0;\r
440}\r
441\r
442/**\r
443 Set the bit in bitmap table for the given address.\r
444\r
445 @param[in] Address The address to set for.\r
446\r
447 @return VOID.\r
448**/\r
449VOID\r
450EFIAPI\r
451SetGuardMapBit (\r
452 IN EFI_PHYSICAL_ADDRESS Address\r
453 )\r
454{\r
455 UINT64 *GuardMap;\r
456 UINT64 BitMask;\r
457\r
458 FindGuardedMemoryMap (Address, TRUE, &GuardMap);\r
459 if (GuardMap != NULL) {\r
460 BitMask = LShiftU64 (1, GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address));\r
461 *GuardMap |= BitMask;\r
462 }\r
463}\r
464\r
465/**\r
466 Clear the bit in bitmap table for the given address.\r
467\r
468 @param[in] Address The address to clear for.\r
469\r
470 @return VOID.\r
471**/\r
472VOID\r
473EFIAPI\r
474ClearGuardMapBit (\r
475 IN EFI_PHYSICAL_ADDRESS Address\r
476 )\r
477{\r
478 UINT64 *GuardMap;\r
479 UINT64 BitMask;\r
480\r
481 FindGuardedMemoryMap (Address, TRUE, &GuardMap);\r
482 if (GuardMap != NULL) {\r
483 BitMask = LShiftU64 (1, GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address));\r
484 *GuardMap &= ~BitMask;\r
485 }\r
486}\r
487\r
488/**\r
489 Check to see if the page at the given address is a Guard page or not.\r
490\r
491 @param[in] Address The address to check for.\r
492\r
493 @return TRUE The page at Address is a Guard page.\r
494 @return FALSE The page at Address is not a Guard page.\r
495**/\r
496BOOLEAN\r
497EFIAPI\r
498IsGuardPage (\r
499 IN EFI_PHYSICAL_ADDRESS Address\r
500 )\r
501{\r
502 UINTN BitMap;\r
503\r
504 //\r
505 // There must be at least one guarded page before and/or after given\r
506 // address if it's a Guard page. The bitmap pattern should be one of\r
507 // 001, 100 and 101\r
508 //\r
509 BitMap = GetGuardedMemoryBits (Address - EFI_PAGE_SIZE, 3);\r
510 return ((BitMap == BIT0) || (BitMap == BIT2) || (BitMap == (BIT2 | BIT0)));\r
511}\r
512\r
513/**\r
514 Check to see if the page at the given address is a head Guard page or not.\r
515\r
516 @param[in] Address The address to check for\r
517\r
518 @return TRUE The page at Address is a head Guard page\r
519 @return FALSE The page at Address is not a head Guard page\r
520**/\r
521BOOLEAN\r
522EFIAPI\r
523IsHeadGuard (\r
524 IN EFI_PHYSICAL_ADDRESS Address\r
525 )\r
526{\r
527 return (GetGuardedMemoryBits (Address, 2) == BIT1);\r
528}\r
529\r
530/**\r
531 Check to see if the page at the given address is a tail Guard page or not.\r
532\r
533 @param[in] Address The address to check for.\r
534\r
535 @return TRUE The page at Address is a tail Guard page.\r
536 @return FALSE The page at Address is not a tail Guard page.\r
537**/\r
538BOOLEAN\r
539EFIAPI\r
540IsTailGuard (\r
541 IN EFI_PHYSICAL_ADDRESS Address\r
542 )\r
543{\r
544 return (GetGuardedMemoryBits (Address - EFI_PAGE_SIZE, 2) == BIT0);\r
545}\r
546\r
547/**\r
548 Check to see if the page at the given address is guarded or not.\r
549\r
550 @param[in] Address The address to check for.\r
551\r
552 @return TRUE The page at Address is guarded.\r
553 @return FALSE The page at Address is not guarded.\r
554**/\r
555BOOLEAN\r
556EFIAPI\r
557IsMemoryGuarded (\r
558 IN EFI_PHYSICAL_ADDRESS Address\r
559 )\r
560{\r
561 return (GetGuardMapBit (Address) == 1);\r
562}\r
563\r
564/**\r
565 Set the page at the given address to be a Guard page.\r
566\r
567 This is done by changing the page table attribute to be NOT PRSENT.\r
568\r
569 @param[in] BaseAddress Page address to Guard at\r
570\r
571 @return VOID\r
572**/\r
573VOID\r
574EFIAPI\r
575SetGuardPage (\r
576 IN EFI_PHYSICAL_ADDRESS BaseAddress\r
577 )\r
578{\r
579 if (gCpu == NULL) {\r
580 return;\r
581 }\r
582\r
583 //\r
584 // Set flag to make sure allocating memory without GUARD for page table\r
585 // operation; otherwise infinite loops could be caused.\r
586 //\r
587 mOnGuarding = TRUE;\r
588 //\r
589 // Note: This might overwrite other attributes needed by other features,\r
590 // such as NX memory protection.\r
591 //\r
592 gCpu->SetMemoryAttributes (gCpu, BaseAddress, EFI_PAGE_SIZE, EFI_MEMORY_RP);\r
593 mOnGuarding = FALSE;\r
594}\r
595\r
596/**\r
597 Unset the Guard page at the given address to the normal memory.\r
598\r
599 This is done by changing the page table attribute to be PRSENT.\r
600\r
601 @param[in] BaseAddress Page address to Guard at.\r
602\r
603 @return VOID.\r
604**/\r
605VOID\r
606EFIAPI\r
607UnsetGuardPage (\r
608 IN EFI_PHYSICAL_ADDRESS BaseAddress\r
609 )\r
610{\r
611 UINT64 Attributes;\r
612\r
613 if (gCpu == NULL) {\r
614 return;\r
615 }\r
616\r
617 //\r
618 // Once the Guard page is unset, it will be freed back to memory pool. NX\r
619 // memory protection must be restored for this page if NX is enabled for free\r
620 // memory.\r
621 //\r
622 Attributes = 0;\r
623 if ((PcdGet64 (PcdDxeNxMemoryProtectionPolicy) & (1 << EfiConventionalMemory)) != 0) {\r
624 Attributes |= EFI_MEMORY_XP;\r
625 }\r
626\r
627 //\r
628 // Set flag to make sure allocating memory without GUARD for page table\r
629 // operation; otherwise infinite loops could be caused.\r
630 //\r
631 mOnGuarding = TRUE;\r
632 //\r
633 // Note: This might overwrite other attributes needed by other features,\r
634 // such as memory protection (NX). Please make sure they are not enabled\r
635 // at the same time.\r
636 //\r
637 gCpu->SetMemoryAttributes (gCpu, BaseAddress, EFI_PAGE_SIZE, Attributes);\r
638 mOnGuarding = FALSE;\r
639}\r
640\r
641/**\r
642 Check to see if the memory at the given address should be guarded or not.\r
643\r
644 @param[in] MemoryType Memory type to check.\r
645 @param[in] AllocateType Allocation type to check.\r
646 @param[in] PageOrPool Indicate a page allocation or pool allocation.\r
647\r
648\r
649 @return TRUE The given type of memory should be guarded.\r
650 @return FALSE The given type of memory should not be guarded.\r
651**/\r
652BOOLEAN\r
653IsMemoryTypeToGuard (\r
654 IN EFI_MEMORY_TYPE MemoryType,\r
655 IN EFI_ALLOCATE_TYPE AllocateType,\r
656 IN UINT8 PageOrPool\r
657 )\r
658{\r
659 UINT64 TestBit;\r
660 UINT64 ConfigBit;\r
661 BOOLEAN InSmm;\r
662\r
663 if (AllocateType == AllocateAddress) {\r
664 return FALSE;\r
665 }\r
666\r
667 InSmm = FALSE;\r
668 if (gSmmBase2 != NULL) {\r
669 gSmmBase2->InSmm (gSmmBase2, &InSmm);\r
670 }\r
671\r
672 if (InSmm) {\r
673 return FALSE;\r
674 }\r
675\r
676 if ((PcdGet8 (PcdHeapGuardPropertyMask) & PageOrPool) == 0) {\r
677 return FALSE;\r
678 }\r
679\r
680 if (PageOrPool == GUARD_HEAP_TYPE_POOL) {\r
681 ConfigBit = PcdGet64 (PcdHeapGuardPoolType);\r
682 } else if (PageOrPool == GUARD_HEAP_TYPE_PAGE) {\r
683 ConfigBit = PcdGet64 (PcdHeapGuardPageType);\r
684 } else {\r
685 ConfigBit = (UINT64)-1;\r
686 }\r
687\r
688 if ((UINT32)MemoryType >= MEMORY_TYPE_OS_RESERVED_MIN) {\r
689 TestBit = BIT63;\r
690 } else if ((UINT32) MemoryType >= MEMORY_TYPE_OEM_RESERVED_MIN) {\r
691 TestBit = BIT62;\r
692 } else if (MemoryType < EfiMaxMemoryType) {\r
693 TestBit = LShiftU64 (1, MemoryType);\r
694 } else if (MemoryType == EfiMaxMemoryType) {\r
695 TestBit = (UINT64)-1;\r
696 } else {\r
697 TestBit = 0;\r
698 }\r
699\r
700 return ((ConfigBit & TestBit) != 0);\r
701}\r
702\r
703/**\r
704 Check to see if the pool at the given address should be guarded or not.\r
705\r
706 @param[in] MemoryType Pool type to check.\r
707\r
708\r
709 @return TRUE The given type of pool should be guarded.\r
710 @return FALSE The given type of pool should not be guarded.\r
711**/\r
712BOOLEAN\r
713IsPoolTypeToGuard (\r
714 IN EFI_MEMORY_TYPE MemoryType\r
715 )\r
716{\r
717 return IsMemoryTypeToGuard (MemoryType, AllocateAnyPages,\r
718 GUARD_HEAP_TYPE_POOL);\r
719}\r
720\r
721/**\r
722 Check to see if the page at the given address should be guarded or not.\r
723\r
724 @param[in] MemoryType Page type to check.\r
725 @param[in] AllocateType Allocation type to check.\r
726\r
727 @return TRUE The given type of page should be guarded.\r
728 @return FALSE The given type of page should not be guarded.\r
729**/\r
730BOOLEAN\r
731IsPageTypeToGuard (\r
732 IN EFI_MEMORY_TYPE MemoryType,\r
733 IN EFI_ALLOCATE_TYPE AllocateType\r
734 )\r
735{\r
736 return IsMemoryTypeToGuard (MemoryType, AllocateType, GUARD_HEAP_TYPE_PAGE);\r
737}\r
738\r
739/**\r
740 Check to see if the heap guard is enabled for page and/or pool allocation.\r
741\r
742 @return TRUE/FALSE.\r
743**/\r
744BOOLEAN\r
745IsHeapGuardEnabled (\r
746 VOID\r
747 )\r
748{\r
749 return IsMemoryTypeToGuard (EfiMaxMemoryType, AllocateAnyPages,\r
750 GUARD_HEAP_TYPE_POOL|GUARD_HEAP_TYPE_PAGE);\r
751}\r
752\r
753/**\r
754 Set head Guard and tail Guard for the given memory range.\r
755\r
756 @param[in] Memory Base address of memory to set guard for.\r
757 @param[in] NumberOfPages Memory size in pages.\r
758\r
759 @return VOID\r
760**/\r
761VOID\r
762SetGuardForMemory (\r
763 IN EFI_PHYSICAL_ADDRESS Memory,\r
764 IN UINTN NumberOfPages\r
765 )\r
766{\r
767 EFI_PHYSICAL_ADDRESS GuardPage;\r
768\r
769 //\r
770 // Set tail Guard\r
771 //\r
772 GuardPage = Memory + EFI_PAGES_TO_SIZE (NumberOfPages);\r
773 if (!IsGuardPage (GuardPage)) {\r
774 SetGuardPage (GuardPage);\r
775 }\r
776\r
777 // Set head Guard\r
778 GuardPage = Memory - EFI_PAGES_TO_SIZE (1);\r
779 if (!IsGuardPage (GuardPage)) {\r
780 SetGuardPage (GuardPage);\r
781 }\r
782\r
783 //\r
784 // Mark the memory range as Guarded\r
785 //\r
786 SetGuardedMemoryBits (Memory, NumberOfPages);\r
787}\r
788\r
789/**\r
790 Unset head Guard and tail Guard for the given memory range.\r
791\r
792 @param[in] Memory Base address of memory to unset guard for.\r
793 @param[in] NumberOfPages Memory size in pages.\r
794\r
795 @return VOID\r
796**/\r
797VOID\r
798UnsetGuardForMemory (\r
799 IN EFI_PHYSICAL_ADDRESS Memory,\r
800 IN UINTN NumberOfPages\r
801 )\r
802{\r
803 EFI_PHYSICAL_ADDRESS GuardPage;\r
804 UINT64 GuardBitmap;\r
805\r
806 if (NumberOfPages == 0) {\r
807 return;\r
808 }\r
809\r
810 //\r
811 // Head Guard must be one page before, if any.\r
812 //\r
813 // MSB-> 1 0 <-LSB\r
814 // -------------------\r
815 // Head Guard -> 0 1 -> Don't free Head Guard (shared Guard)\r
816 // Head Guard -> 0 0 -> Free Head Guard either (not shared Guard)\r
817 // 1 X -> Don't free first page (need a new Guard)\r
818 // (it'll be turned into a Guard page later)\r
819 // -------------------\r
820 // Start -> -1 -2\r
821 //\r
822 GuardPage = Memory - EFI_PAGES_TO_SIZE (1);\r
823 GuardBitmap = GetGuardedMemoryBits (Memory - EFI_PAGES_TO_SIZE (2), 2);\r
824 if ((GuardBitmap & BIT1) == 0) {\r
825 //\r
826 // Head Guard exists.\r
827 //\r
828 if ((GuardBitmap & BIT0) == 0) {\r
829 //\r
830 // If the head Guard is not a tail Guard of adjacent memory block,\r
831 // unset it.\r
832 //\r
833 UnsetGuardPage (GuardPage);\r
834 }\r
835 } else {\r
836 //\r
837 // Pages before memory to free are still in Guard. It's a partial free\r
838 // case. Turn first page of memory block to free into a new Guard.\r
839 //\r
840 SetGuardPage (Memory);\r
841 }\r
842\r
843 //\r
844 // Tail Guard must be the page after this memory block to free, if any.\r
845 //\r
846 // MSB-> 1 0 <-LSB\r
847 // --------------------\r
848 // 1 0 <- Tail Guard -> Don't free Tail Guard (shared Guard)\r
849 // 0 0 <- Tail Guard -> Free Tail Guard either (not shared Guard)\r
850 // X 1 -> Don't free last page (need a new Guard)\r
851 // (it'll be turned into a Guard page later)\r
852 // --------------------\r
853 // +1 +0 <- End\r
854 //\r
855 GuardPage = Memory + EFI_PAGES_TO_SIZE (NumberOfPages);\r
856 GuardBitmap = GetGuardedMemoryBits (GuardPage, 2);\r
857 if ((GuardBitmap & BIT0) == 0) {\r
858 //\r
859 // Tail Guard exists.\r
860 //\r
861 if ((GuardBitmap & BIT1) == 0) {\r
862 //\r
863 // If the tail Guard is not a head Guard of adjacent memory block,\r
864 // free it; otherwise, keep it.\r
865 //\r
866 UnsetGuardPage (GuardPage);\r
867 }\r
868 } else {\r
869 //\r
870 // Pages after memory to free are still in Guard. It's a partial free\r
871 // case. We need to keep one page to be a head Guard.\r
872 //\r
873 SetGuardPage (GuardPage - EFI_PAGES_TO_SIZE (1));\r
874 }\r
875\r
876 //\r
877 // No matter what, we just clear the mark of the Guarded memory.\r
878 //\r
879 ClearGuardedMemoryBits(Memory, NumberOfPages);\r
880}\r
881\r
882/**\r
883 Adjust address of free memory according to existing and/or required Guard.\r
884\r
885 This function will check if there're existing Guard pages of adjacent\r
886 memory blocks, and try to use it as the Guard page of the memory to be\r
887 allocated.\r
888\r
889 @param[in] Start Start address of free memory block.\r
890 @param[in] Size Size of free memory block.\r
891 @param[in] SizeRequested Size of memory to allocate.\r
892\r
893 @return The end address of memory block found.\r
894 @return 0 if no enough space for the required size of memory and its Guard.\r
895**/\r
896UINT64\r
897AdjustMemoryS (\r
898 IN UINT64 Start,\r
899 IN UINT64 Size,\r
900 IN UINT64 SizeRequested\r
901 )\r
902{\r
903 UINT64 Target;\r
904\r
905 //\r
906 // UEFI spec requires that allocated pool must be 8-byte aligned. If it's\r
907 // indicated to put the pool near the Tail Guard, we need extra bytes to\r
908 // make sure alignment of the returned pool address.\r
909 //\r
910 if ((PcdGet8 (PcdHeapGuardPropertyMask) & BIT7) == 0) {\r
911 SizeRequested = ALIGN_VALUE(SizeRequested, 8);\r
912 }\r
913\r
914 Target = Start + Size - SizeRequested;\r
915 ASSERT (Target >= Start);\r
916 if (Target == 0) {\r
917 return 0;\r
918 }\r
919\r
920 if (!IsGuardPage (Start + Size)) {\r
921 // No Guard at tail to share. One more page is needed.\r
922 Target -= EFI_PAGES_TO_SIZE (1);\r
923 }\r
924\r
925 // Out of range?\r
926 if (Target < Start) {\r
927 return 0;\r
928 }\r
929\r
930 // At the edge?\r
931 if (Target == Start) {\r
932 if (!IsGuardPage (Target - EFI_PAGES_TO_SIZE (1))) {\r
933 // No enough space for a new head Guard if no Guard at head to share.\r
934 return 0;\r
935 }\r
936 }\r
937\r
938 // OK, we have enough pages for memory and its Guards. Return the End of the\r
939 // free space.\r
940 return Target + SizeRequested - 1;\r
941}\r
942\r
943/**\r
944 Adjust the start address and number of pages to free according to Guard.\r
945\r
946 The purpose of this function is to keep the shared Guard page with adjacent\r
947 memory block if it's still in guard, or free it if no more sharing. Another\r
948 is to reserve pages as Guard pages in partial page free situation.\r
949\r
950 @param[in,out] Memory Base address of memory to free.\r
951 @param[in,out] NumberOfPages Size of memory to free.\r
952\r
953 @return VOID.\r
954**/\r
955VOID\r
956AdjustMemoryF (\r
957 IN OUT EFI_PHYSICAL_ADDRESS *Memory,\r
958 IN OUT UINTN *NumberOfPages\r
959 )\r
960{\r
961 EFI_PHYSICAL_ADDRESS Start;\r
962 EFI_PHYSICAL_ADDRESS MemoryToTest;\r
963 UINTN PagesToFree;\r
964 UINT64 GuardBitmap;\r
965\r
966 if (Memory == NULL || NumberOfPages == NULL || *NumberOfPages == 0) {\r
967 return;\r
968 }\r
969\r
970 Start = *Memory;\r
971 PagesToFree = *NumberOfPages;\r
972\r
973 //\r
974 // Head Guard must be one page before, if any.\r
975 //\r
976 // MSB-> 1 0 <-LSB\r
977 // -------------------\r
978 // Head Guard -> 0 1 -> Don't free Head Guard (shared Guard)\r
979 // Head Guard -> 0 0 -> Free Head Guard either (not shared Guard)\r
980 // 1 X -> Don't free first page (need a new Guard)\r
981 // (it'll be turned into a Guard page later)\r
982 // -------------------\r
983 // Start -> -1 -2\r
984 //\r
985 MemoryToTest = Start - EFI_PAGES_TO_SIZE (2);\r
986 GuardBitmap = GetGuardedMemoryBits (MemoryToTest, 2);\r
987 if ((GuardBitmap & BIT1) == 0) {\r
988 //\r
989 // Head Guard exists.\r
990 //\r
991 if ((GuardBitmap & BIT0) == 0) {\r
992 //\r
993 // If the head Guard is not a tail Guard of adjacent memory block,\r
994 // free it; otherwise, keep it.\r
995 //\r
996 Start -= EFI_PAGES_TO_SIZE (1);\r
997 PagesToFree += 1;\r
998 }\r
999 } else {\r
1000 //\r
1001 // No Head Guard, and pages before memory to free are still in Guard. It's a\r
1002 // partial free case. We need to keep one page to be a tail Guard.\r
1003 //\r
1004 Start += EFI_PAGES_TO_SIZE (1);\r
1005 PagesToFree -= 1;\r
1006 }\r
1007\r
1008 //\r
1009 // Tail Guard must be the page after this memory block to free, if any.\r
1010 //\r
1011 // MSB-> 1 0 <-LSB\r
1012 // --------------------\r
1013 // 1 0 <- Tail Guard -> Don't free Tail Guard (shared Guard)\r
1014 // 0 0 <- Tail Guard -> Free Tail Guard either (not shared Guard)\r
1015 // X 1 -> Don't free last page (need a new Guard)\r
1016 // (it'll be turned into a Guard page later)\r
1017 // --------------------\r
1018 // +1 +0 <- End\r
1019 //\r
1020 MemoryToTest = Start + EFI_PAGES_TO_SIZE (PagesToFree);\r
1021 GuardBitmap = GetGuardedMemoryBits (MemoryToTest, 2);\r
1022 if ((GuardBitmap & BIT0) == 0) {\r
1023 //\r
1024 // Tail Guard exists.\r
1025 //\r
1026 if ((GuardBitmap & BIT1) == 0) {\r
1027 //\r
1028 // If the tail Guard is not a head Guard of adjacent memory block,\r
1029 // free it; otherwise, keep it.\r
1030 //\r
1031 PagesToFree += 1;\r
1032 }\r
1033 } else if (PagesToFree > 0) {\r
1034 //\r
1035 // No Tail Guard, and pages after memory to free are still in Guard. It's a\r
1036 // partial free case. We need to keep one page to be a head Guard.\r
1037 //\r
1038 PagesToFree -= 1;\r
1039 }\r
1040\r
1041 *Memory = Start;\r
1042 *NumberOfPages = PagesToFree;\r
1043}\r
1044\r
1045/**\r
1046 Adjust the base and number of pages to really allocate according to Guard.\r
1047\r
1048 @param[in,out] Memory Base address of free memory.\r
1049 @param[in,out] NumberOfPages Size of memory to allocate.\r
1050\r
1051 @return VOID.\r
1052**/\r
1053VOID\r
1054AdjustMemoryA (\r
1055 IN OUT EFI_PHYSICAL_ADDRESS *Memory,\r
1056 IN OUT UINTN *NumberOfPages\r
1057 )\r
1058{\r
1059 //\r
1060 // FindFreePages() has already taken the Guard into account. It's safe to\r
1061 // adjust the start address and/or number of pages here, to make sure that\r
1062 // the Guards are also "allocated".\r
1063 //\r
1064 if (!IsGuardPage (*Memory + EFI_PAGES_TO_SIZE (*NumberOfPages))) {\r
1065 // No tail Guard, add one.\r
1066 *NumberOfPages += 1;\r
1067 }\r
1068\r
1069 if (!IsGuardPage (*Memory - EFI_PAGE_SIZE)) {\r
1070 // No head Guard, add one.\r
1071 *Memory -= EFI_PAGE_SIZE;\r
1072 *NumberOfPages += 1;\r
1073 }\r
1074}\r
1075\r
1076/**\r
1077 Adjust the pool head position to make sure the Guard page is adjavent to\r
1078 pool tail or pool head.\r
1079\r
1080 @param[in] Memory Base address of memory allocated.\r
1081 @param[in] NoPages Number of pages actually allocated.\r
1082 @param[in] Size Size of memory requested.\r
1083 (plus pool head/tail overhead)\r
1084\r
1085 @return Address of pool head.\r
1086**/\r
1087VOID *\r
1088AdjustPoolHeadA (\r
1089 IN EFI_PHYSICAL_ADDRESS Memory,\r
1090 IN UINTN NoPages,\r
1091 IN UINTN Size\r
1092 )\r
1093{\r
1094 if (Memory == 0 || (PcdGet8 (PcdHeapGuardPropertyMask) & BIT7) != 0) {\r
1095 //\r
1096 // Pool head is put near the head Guard\r
1097 //\r
1098 return (VOID *)(UINTN)Memory;\r
1099 }\r
1100\r
1101 //\r
1102 // Pool head is put near the tail Guard\r
1103 //\r
1104 Size = ALIGN_VALUE (Size, 8);\r
1105 return (VOID *)(UINTN)(Memory + EFI_PAGES_TO_SIZE (NoPages) - Size);\r
1106}\r
1107\r
1108/**\r
1109 Get the page base address according to pool head address.\r
1110\r
1111 @param[in] Memory Head address of pool to free.\r
1112\r
1113 @return Address of pool head.\r
1114**/\r
1115VOID *\r
1116AdjustPoolHeadF (\r
1117 IN EFI_PHYSICAL_ADDRESS Memory\r
1118 )\r
1119{\r
1120 if (Memory == 0 || (PcdGet8 (PcdHeapGuardPropertyMask) & BIT7) != 0) {\r
1121 //\r
1122 // Pool head is put near the head Guard\r
1123 //\r
1124 return (VOID *)(UINTN)Memory;\r
1125 }\r
1126\r
1127 //\r
1128 // Pool head is put near the tail Guard\r
1129 //\r
1130 return (VOID *)(UINTN)(Memory & ~EFI_PAGE_MASK);\r
1131}\r
1132\r
1133/**\r
1134 Allocate or free guarded memory.\r
1135\r
1136 @param[in] Start Start address of memory to allocate or free.\r
1137 @param[in] NumberOfPages Memory size in pages.\r
1138 @param[in] NewType Memory type to convert to.\r
1139\r
1140 @return VOID.\r
1141**/\r
1142EFI_STATUS\r
1143CoreConvertPagesWithGuard (\r
1144 IN UINT64 Start,\r
1145 IN UINTN NumberOfPages,\r
1146 IN EFI_MEMORY_TYPE NewType\r
1147 )\r
1148{\r
1149 UINT64 OldStart;\r
1150 UINTN OldPages;\r
1151\r
1152 if (NewType == EfiConventionalMemory) {\r
1153 OldStart = Start;\r
1154 OldPages = NumberOfPages;\r
1155\r
1156 AdjustMemoryF (&Start, &NumberOfPages);\r
1157 //\r
1158 // It's safe to unset Guard page inside memory lock because there should\r
1159 // be no memory allocation occurred in updating memory page attribute at\r
1160 // this point. And unsetting Guard page before free will prevent Guard\r
1161 // page just freed back to pool from being allocated right away before\r
1162 // marking it usable (from non-present to present).\r
1163 //\r
1164 UnsetGuardForMemory (OldStart, OldPages);\r
1165 if (NumberOfPages == 0) {\r
1166 return EFI_SUCCESS;\r
1167 }\r
1168 } else {\r
1169 AdjustMemoryA (&Start, &NumberOfPages);\r
1170 }\r
1171\r
1172 return CoreConvertPages (Start, NumberOfPages, NewType);\r
1173}\r
1174\r
1175/**\r
1176 Set all Guard pages which cannot be set before CPU Arch Protocol installed.\r
1177**/\r
1178VOID\r
1179SetAllGuardPages (\r
1180 VOID\r
1181 )\r
1182{\r
1183 UINTN Entries[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1184 UINTN Shifts[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1185 UINTN Indices[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1186 UINT64 Tables[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1187 UINT64 Addresses[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1188 UINT64 TableEntry;\r
1189 UINT64 Address;\r
1190 UINT64 GuardPage;\r
1191 INTN Level;\r
1192 UINTN Index;\r
1193 BOOLEAN OnGuarding;\r
1194\r
1195 if (mGuardedMemoryMap == 0 ||\r
1196 mMapLevel == 0 ||\r
1197 mMapLevel > GUARDED_HEAP_MAP_TABLE_DEPTH) {\r
1198 return;\r
1199 }\r
1200\r
1201 CopyMem (Entries, mLevelMask, sizeof (Entries));\r
1202 CopyMem (Shifts, mLevelShift, sizeof (Shifts));\r
1203\r
1204 SetMem (Tables, sizeof(Tables), 0);\r
1205 SetMem (Addresses, sizeof(Addresses), 0);\r
1206 SetMem (Indices, sizeof(Indices), 0);\r
1207\r
1208 Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;\r
1209 Tables[Level] = mGuardedMemoryMap;\r
1210 Address = 0;\r
1211 OnGuarding = FALSE;\r
1212\r
1213 DEBUG_CODE (\r
1214 DumpGuardedMemoryBitmap ();\r
1215 );\r
1216\r
1217 while (TRUE) {\r
1218 if (Indices[Level] > Entries[Level]) {\r
1219 Tables[Level] = 0;\r
1220 Level -= 1;\r
1221 } else {\r
1222\r
1223 TableEntry = ((UINT64 *)(UINTN)(Tables[Level]))[Indices[Level]];\r
1224 Address = Addresses[Level];\r
1225\r
1226 if (TableEntry == 0) {\r
1227\r
1228 OnGuarding = FALSE;\r
1229\r
1230 } else if (Level < GUARDED_HEAP_MAP_TABLE_DEPTH - 1) {\r
1231\r
1232 Level += 1;\r
1233 Tables[Level] = TableEntry;\r
1234 Addresses[Level] = Address;\r
1235 Indices[Level] = 0;\r
1236\r
1237 continue;\r
1238\r
1239 } else {\r
1240\r
1241 Index = 0;\r
1242 while (Index < GUARDED_HEAP_MAP_ENTRY_BITS) {\r
1243 if ((TableEntry & 1) == 1) {\r
1244 if (OnGuarding) {\r
1245 GuardPage = 0;\r
1246 } else {\r
1247 GuardPage = Address - EFI_PAGE_SIZE;\r
1248 }\r
1249 OnGuarding = TRUE;\r
1250 } else {\r
1251 if (OnGuarding) {\r
1252 GuardPage = Address;\r
1253 } else {\r
1254 GuardPage = 0;\r
1255 }\r
1256 OnGuarding = FALSE;\r
1257 }\r
1258\r
1259 if (GuardPage != 0) {\r
1260 SetGuardPage (GuardPage);\r
1261 }\r
1262\r
1263 if (TableEntry == 0) {\r
1264 break;\r
1265 }\r
1266\r
1267 TableEntry = RShiftU64 (TableEntry, 1);\r
1268 Address += EFI_PAGE_SIZE;\r
1269 Index += 1;\r
1270 }\r
1271 }\r
1272 }\r
1273\r
1274 if (Level < (GUARDED_HEAP_MAP_TABLE_DEPTH - (INTN)mMapLevel)) {\r
1275 break;\r
1276 }\r
1277\r
1278 Indices[Level] += 1;\r
1279 Address = (Level == 0) ? 0 : Addresses[Level - 1];\r
1280 Addresses[Level] = Address | LShiftU64(Indices[Level], Shifts[Level]);\r
1281\r
1282 }\r
1283}\r
1284\r
1285/**\r
1286 Notify function used to set all Guard pages before CPU Arch Protocol installed.\r
1287**/\r
1288VOID\r
1289HeapGuardCpuArchProtocolNotify (\r
1290 VOID\r
1291 )\r
1292{\r
1293 ASSERT (gCpu != NULL);\r
1294 SetAllGuardPages ();\r
1295}\r
1296\r
1297/**\r
1298 Helper function to convert a UINT64 value in binary to a string.\r
1299\r
1300 @param[in] Value Value of a UINT64 integer.\r
1301 @param[out] BinString String buffer to contain the conversion result.\r
1302\r
1303 @return VOID.\r
1304**/\r
1305VOID\r
1306Uint64ToBinString (\r
1307 IN UINT64 Value,\r
1308 OUT CHAR8 *BinString\r
1309 )\r
1310{\r
1311 UINTN Index;\r
1312\r
1313 if (BinString == NULL) {\r
1314 return;\r
1315 }\r
1316\r
1317 for (Index = 64; Index > 0; --Index) {\r
1318 BinString[Index - 1] = '0' + (Value & 1);\r
1319 Value = RShiftU64 (Value, 1);\r
1320 }\r
1321 BinString[64] = '\0';\r
1322}\r
1323\r
1324/**\r
1325 Dump the guarded memory bit map.\r
1326**/\r
1327VOID\r
1328EFIAPI\r
1329DumpGuardedMemoryBitmap (\r
1330 VOID\r
1331 )\r
1332{\r
1333 UINTN Entries[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1334 UINTN Shifts[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1335 UINTN Indices[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1336 UINT64 Tables[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1337 UINT64 Addresses[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1338 UINT64 TableEntry;\r
1339 UINT64 Address;\r
1340 INTN Level;\r
1341 UINTN RepeatZero;\r
1342 CHAR8 String[GUARDED_HEAP_MAP_ENTRY_BITS + 1];\r
1343 CHAR8 *Ruler1;\r
1344 CHAR8 *Ruler2;\r
1345\r
1346 if (mGuardedMemoryMap == 0 ||\r
1347 mMapLevel == 0 ||\r
1348 mMapLevel > GUARDED_HEAP_MAP_TABLE_DEPTH) {\r
1349 return;\r
1350 }\r
1351\r
1352 Ruler1 = " 3 2 1 0";\r
1353 Ruler2 = "FEDCBA9876543210FEDCBA9876543210FEDCBA9876543210FEDCBA9876543210";\r
1354\r
1355 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "============================="\r
1356 " Guarded Memory Bitmap "\r
1357 "==============================\r\n"));\r
1358 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, " %a\r\n", Ruler1));\r
1359 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, " %a\r\n", Ruler2));\r
1360\r
1361 CopyMem (Entries, mLevelMask, sizeof (Entries));\r
1362 CopyMem (Shifts, mLevelShift, sizeof (Shifts));\r
1363\r
1364 SetMem (Indices, sizeof(Indices), 0);\r
1365 SetMem (Tables, sizeof(Tables), 0);\r
1366 SetMem (Addresses, sizeof(Addresses), 0);\r
1367\r
1368 Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;\r
1369 Tables[Level] = mGuardedMemoryMap;\r
1370 Address = 0;\r
1371 RepeatZero = 0;\r
1372\r
1373 while (TRUE) {\r
1374 if (Indices[Level] > Entries[Level]) {\r
1375\r
1376 Tables[Level] = 0;\r
1377 Level -= 1;\r
1378 RepeatZero = 0;\r
1379\r
1380 DEBUG ((\r
1381 HEAP_GUARD_DEBUG_LEVEL,\r
1382 "========================================="\r
1383 "=========================================\r\n"\r
1384 ));\r
1385\r
1386 } else {\r
1387\r
1388 TableEntry = ((UINT64 *)(UINTN)Tables[Level])[Indices[Level]];\r
1389 Address = Addresses[Level];\r
1390\r
1391 if (TableEntry == 0) {\r
1392\r
1393 if (Level == GUARDED_HEAP_MAP_TABLE_DEPTH - 1) {\r
1394 if (RepeatZero == 0) {\r
1395 Uint64ToBinString(TableEntry, String);\r
1396 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "%016lx: %a\r\n", Address, String));\r
1397 } else if (RepeatZero == 1) {\r
1398 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "... : ...\r\n"));\r
1399 }\r
1400 RepeatZero += 1;\r
1401 }\r
1402\r
1403 } else if (Level < GUARDED_HEAP_MAP_TABLE_DEPTH - 1) {\r
1404\r
1405 Level += 1;\r
1406 Tables[Level] = TableEntry;\r
1407 Addresses[Level] = Address;\r
1408 Indices[Level] = 0;\r
1409 RepeatZero = 0;\r
1410\r
1411 continue;\r
1412\r
1413 } else {\r
1414\r
1415 RepeatZero = 0;\r
1416 Uint64ToBinString(TableEntry, String);\r
1417 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "%016lx: %a\r\n", Address, String));\r
1418\r
1419 }\r
1420 }\r
1421\r
1422 if (Level < (GUARDED_HEAP_MAP_TABLE_DEPTH - (INTN)mMapLevel)) {\r
1423 break;\r
1424 }\r
1425\r
1426 Indices[Level] += 1;\r
1427 Address = (Level == 0) ? 0 : Addresses[Level - 1];\r
1428 Addresses[Level] = Address | LShiftU64(Indices[Level], Shifts[Level]);\r
1429\r
1430 }\r
1431}\r
1432\r