]> git.proxmox.com Git - mirror_edk2.git/blame - MdeModulePkg/Core/Dxe/Mem/HeapGuard.c
MdeModulePkg/DxeCore: add sanity check for SetMemoryAttributes
[mirror_edk2.git] / MdeModulePkg / Core / Dxe / Mem / HeapGuard.c
CommitLineData
e63da9f0
JW
1/** @file\r
2 UEFI Heap Guard functions.\r
3\r
8b13bca9 4Copyright (c) 2017-2018, Intel Corporation. All rights reserved.<BR>\r
e63da9f0
JW
5This program and the accompanying materials\r
6are licensed and made available under the terms and conditions of the BSD License\r
7which accompanies this distribution. The full text of the license may be found at\r
8http://opensource.org/licenses/bsd-license.php\r
9\r
10THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
11WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
12\r
13**/\r
14\r
15#include "DxeMain.h"\r
16#include "Imem.h"\r
17#include "HeapGuard.h"\r
18\r
19//\r
20// Global to avoid infinite reentrance of memory allocation when updating\r
21// page table attributes, which may need allocate pages for new PDE/PTE.\r
22//\r
23GLOBAL_REMOVE_IF_UNREFERENCED BOOLEAN mOnGuarding = FALSE;\r
24\r
25//\r
26// Pointer to table tracking the Guarded memory with bitmap, in which '1'\r
27// is used to indicate memory guarded. '0' might be free memory or Guard\r
28// page itself, depending on status of memory adjacent to it.\r
29//\r
30GLOBAL_REMOVE_IF_UNREFERENCED UINT64 mGuardedMemoryMap = 0;\r
31\r
32//\r
33// Current depth level of map table pointed by mGuardedMemoryMap.\r
34// mMapLevel must be initialized at least by 1. It will be automatically\r
35// updated according to the address of memory just tracked.\r
36//\r
37GLOBAL_REMOVE_IF_UNREFERENCED UINTN mMapLevel = 1;\r
38\r
39//\r
40// Shift and mask for each level of map table\r
41//\r
42GLOBAL_REMOVE_IF_UNREFERENCED UINTN mLevelShift[GUARDED_HEAP_MAP_TABLE_DEPTH]\r
43 = GUARDED_HEAP_MAP_TABLE_DEPTH_SHIFTS;\r
44GLOBAL_REMOVE_IF_UNREFERENCED UINTN mLevelMask[GUARDED_HEAP_MAP_TABLE_DEPTH]\r
45 = GUARDED_HEAP_MAP_TABLE_DEPTH_MASKS;\r
46\r
47/**\r
48 Set corresponding bits in bitmap table to 1 according to the address.\r
49\r
50 @param[in] Address Start address to set for.\r
51 @param[in] BitNumber Number of bits to set.\r
52 @param[in] BitMap Pointer to bitmap which covers the Address.\r
53\r
54 @return VOID.\r
55**/\r
56STATIC\r
57VOID\r
58SetBits (\r
59 IN EFI_PHYSICAL_ADDRESS Address,\r
60 IN UINTN BitNumber,\r
61 IN UINT64 *BitMap\r
62 )\r
63{\r
64 UINTN Lsbs;\r
65 UINTN Qwords;\r
66 UINTN Msbs;\r
67 UINTN StartBit;\r
68 UINTN EndBit;\r
69\r
70 StartBit = (UINTN)GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address);\r
71 EndBit = (StartBit + BitNumber - 1) % GUARDED_HEAP_MAP_ENTRY_BITS;\r
72\r
36f2f049 73 if ((StartBit + BitNumber) >= GUARDED_HEAP_MAP_ENTRY_BITS) {\r
e63da9f0
JW
74 Msbs = (GUARDED_HEAP_MAP_ENTRY_BITS - StartBit) %\r
75 GUARDED_HEAP_MAP_ENTRY_BITS;\r
76 Lsbs = (EndBit + 1) % GUARDED_HEAP_MAP_ENTRY_BITS;\r
77 Qwords = (BitNumber - Msbs) / GUARDED_HEAP_MAP_ENTRY_BITS;\r
78 } else {\r
79 Msbs = BitNumber;\r
80 Lsbs = 0;\r
81 Qwords = 0;\r
82 }\r
83\r
84 if (Msbs > 0) {\r
85 *BitMap |= LShiftU64 (LShiftU64 (1, Msbs) - 1, StartBit);\r
86 BitMap += 1;\r
87 }\r
88\r
89 if (Qwords > 0) {\r
90 SetMem64 ((VOID *)BitMap, Qwords * GUARDED_HEAP_MAP_ENTRY_BYTES,\r
91 (UINT64)-1);\r
92 BitMap += Qwords;\r
93 }\r
94\r
95 if (Lsbs > 0) {\r
96 *BitMap |= (LShiftU64 (1, Lsbs) - 1);\r
97 }\r
98}\r
99\r
100/**\r
101 Set corresponding bits in bitmap table to 0 according to the address.\r
102\r
103 @param[in] Address Start address to set for.\r
104 @param[in] BitNumber Number of bits to set.\r
105 @param[in] BitMap Pointer to bitmap which covers the Address.\r
106\r
107 @return VOID.\r
108**/\r
109STATIC\r
110VOID\r
111ClearBits (\r
112 IN EFI_PHYSICAL_ADDRESS Address,\r
113 IN UINTN BitNumber,\r
114 IN UINT64 *BitMap\r
115 )\r
116{\r
117 UINTN Lsbs;\r
118 UINTN Qwords;\r
119 UINTN Msbs;\r
120 UINTN StartBit;\r
121 UINTN EndBit;\r
122\r
123 StartBit = (UINTN)GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address);\r
124 EndBit = (StartBit + BitNumber - 1) % GUARDED_HEAP_MAP_ENTRY_BITS;\r
125\r
36f2f049 126 if ((StartBit + BitNumber) >= GUARDED_HEAP_MAP_ENTRY_BITS) {\r
e63da9f0
JW
127 Msbs = (GUARDED_HEAP_MAP_ENTRY_BITS - StartBit) %\r
128 GUARDED_HEAP_MAP_ENTRY_BITS;\r
129 Lsbs = (EndBit + 1) % GUARDED_HEAP_MAP_ENTRY_BITS;\r
130 Qwords = (BitNumber - Msbs) / GUARDED_HEAP_MAP_ENTRY_BITS;\r
131 } else {\r
132 Msbs = BitNumber;\r
133 Lsbs = 0;\r
134 Qwords = 0;\r
135 }\r
136\r
137 if (Msbs > 0) {\r
138 *BitMap &= ~LShiftU64 (LShiftU64 (1, Msbs) - 1, StartBit);\r
139 BitMap += 1;\r
140 }\r
141\r
142 if (Qwords > 0) {\r
143 SetMem64 ((VOID *)BitMap, Qwords * GUARDED_HEAP_MAP_ENTRY_BYTES, 0);\r
144 BitMap += Qwords;\r
145 }\r
146\r
147 if (Lsbs > 0) {\r
148 *BitMap &= ~(LShiftU64 (1, Lsbs) - 1);\r
149 }\r
150}\r
151\r
152/**\r
153 Get corresponding bits in bitmap table according to the address.\r
154\r
155 The value of bit 0 corresponds to the status of memory at given Address.\r
156 No more than 64 bits can be retrieved in one call.\r
157\r
158 @param[in] Address Start address to retrieve bits for.\r
159 @param[in] BitNumber Number of bits to get.\r
160 @param[in] BitMap Pointer to bitmap which covers the Address.\r
161\r
162 @return An integer containing the bits information.\r
163**/\r
164STATIC\r
165UINT64\r
166GetBits (\r
167 IN EFI_PHYSICAL_ADDRESS Address,\r
168 IN UINTN BitNumber,\r
169 IN UINT64 *BitMap\r
170 )\r
171{\r
172 UINTN StartBit;\r
173 UINTN EndBit;\r
174 UINTN Lsbs;\r
175 UINTN Msbs;\r
176 UINT64 Result;\r
177\r
178 ASSERT (BitNumber <= GUARDED_HEAP_MAP_ENTRY_BITS);\r
179\r
180 StartBit = (UINTN)GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address);\r
181 EndBit = (StartBit + BitNumber - 1) % GUARDED_HEAP_MAP_ENTRY_BITS;\r
182\r
183 if ((StartBit + BitNumber) > GUARDED_HEAP_MAP_ENTRY_BITS) {\r
184 Msbs = GUARDED_HEAP_MAP_ENTRY_BITS - StartBit;\r
185 Lsbs = (EndBit + 1) % GUARDED_HEAP_MAP_ENTRY_BITS;\r
186 } else {\r
187 Msbs = BitNumber;\r
188 Lsbs = 0;\r
189 }\r
190\r
36f2f049
JW
191 if (StartBit == 0 && BitNumber == GUARDED_HEAP_MAP_ENTRY_BITS) {\r
192 Result = *BitMap;\r
193 } else {\r
194 Result = RShiftU64((*BitMap), StartBit) & (LShiftU64(1, Msbs) - 1);\r
195 if (Lsbs > 0) {\r
196 BitMap += 1;\r
197 Result |= LShiftU64 ((*BitMap) & (LShiftU64 (1, Lsbs) - 1), Msbs);\r
198 }\r
e63da9f0
JW
199 }\r
200\r
201 return Result;\r
202}\r
203\r
204/**\r
205 Locate the pointer of bitmap from the guarded memory bitmap tables, which\r
206 covers the given Address.\r
207\r
208 @param[in] Address Start address to search the bitmap for.\r
209 @param[in] AllocMapUnit Flag to indicate memory allocation for the table.\r
210 @param[out] BitMap Pointer to bitmap which covers the Address.\r
211\r
212 @return The bit number from given Address to the end of current map table.\r
213**/\r
214UINTN\r
215FindGuardedMemoryMap (\r
216 IN EFI_PHYSICAL_ADDRESS Address,\r
217 IN BOOLEAN AllocMapUnit,\r
218 OUT UINT64 **BitMap\r
219 )\r
220{\r
221 UINTN Level;\r
222 UINT64 *GuardMap;\r
223 UINT64 MapMemory;\r
224 UINTN Index;\r
225 UINTN Size;\r
226 UINTN BitsToUnitEnd;\r
227 EFI_STATUS Status;\r
228\r
229 //\r
230 // Adjust current map table depth according to the address to access\r
231 //\r
dd12683e
JW
232 while (AllocMapUnit &&\r
233 mMapLevel < GUARDED_HEAP_MAP_TABLE_DEPTH &&\r
e63da9f0
JW
234 RShiftU64 (\r
235 Address,\r
236 mLevelShift[GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel - 1]\r
237 ) != 0) {\r
238\r
239 if (mGuardedMemoryMap != 0) {\r
240 Size = (mLevelMask[GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel - 1] + 1)\r
241 * GUARDED_HEAP_MAP_ENTRY_BYTES;\r
242 Status = CoreInternalAllocatePages (\r
243 AllocateAnyPages,\r
244 EfiBootServicesData,\r
245 EFI_SIZE_TO_PAGES (Size),\r
246 &MapMemory,\r
247 FALSE\r
248 );\r
249 ASSERT_EFI_ERROR (Status);\r
250 ASSERT (MapMemory != 0);\r
251\r
252 SetMem ((VOID *)(UINTN)MapMemory, Size, 0);\r
253\r
254 *(UINT64 *)(UINTN)MapMemory = mGuardedMemoryMap;\r
255 mGuardedMemoryMap = MapMemory;\r
256 }\r
257\r
258 mMapLevel++;\r
259\r
260 }\r
261\r
262 GuardMap = &mGuardedMemoryMap;\r
263 for (Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;\r
264 Level < GUARDED_HEAP_MAP_TABLE_DEPTH;\r
265 ++Level) {\r
266\r
267 if (*GuardMap == 0) {\r
268 if (!AllocMapUnit) {\r
269 GuardMap = NULL;\r
270 break;\r
271 }\r
272\r
273 Size = (mLevelMask[Level] + 1) * GUARDED_HEAP_MAP_ENTRY_BYTES;\r
274 Status = CoreInternalAllocatePages (\r
275 AllocateAnyPages,\r
276 EfiBootServicesData,\r
277 EFI_SIZE_TO_PAGES (Size),\r
278 &MapMemory,\r
279 FALSE\r
280 );\r
281 ASSERT_EFI_ERROR (Status);\r
282 ASSERT (MapMemory != 0);\r
283\r
284 SetMem ((VOID *)(UINTN)MapMemory, Size, 0);\r
285 *GuardMap = MapMemory;\r
286 }\r
287\r
288 Index = (UINTN)RShiftU64 (Address, mLevelShift[Level]);\r
289 Index &= mLevelMask[Level];\r
290 GuardMap = (UINT64 *)(UINTN)((*GuardMap) + Index * sizeof (UINT64));\r
291\r
292 }\r
293\r
294 BitsToUnitEnd = GUARDED_HEAP_MAP_BITS - GUARDED_HEAP_MAP_BIT_INDEX (Address);\r
295 *BitMap = GuardMap;\r
296\r
297 return BitsToUnitEnd;\r
298}\r
299\r
300/**\r
301 Set corresponding bits in bitmap table to 1 according to given memory range.\r
302\r
303 @param[in] Address Memory address to guard from.\r
304 @param[in] NumberOfPages Number of pages to guard.\r
305\r
306 @return VOID.\r
307**/\r
308VOID\r
309EFIAPI\r
310SetGuardedMemoryBits (\r
311 IN EFI_PHYSICAL_ADDRESS Address,\r
312 IN UINTN NumberOfPages\r
313 )\r
314{\r
315 UINT64 *BitMap;\r
316 UINTN Bits;\r
317 UINTN BitsToUnitEnd;\r
318\r
319 while (NumberOfPages > 0) {\r
320 BitsToUnitEnd = FindGuardedMemoryMap (Address, TRUE, &BitMap);\r
321 ASSERT (BitMap != NULL);\r
322\r
323 if (NumberOfPages > BitsToUnitEnd) {\r
324 // Cross map unit\r
325 Bits = BitsToUnitEnd;\r
326 } else {\r
327 Bits = NumberOfPages;\r
328 }\r
329\r
330 SetBits (Address, Bits, BitMap);\r
331\r
332 NumberOfPages -= Bits;\r
333 Address += EFI_PAGES_TO_SIZE (Bits);\r
334 }\r
335}\r
336\r
337/**\r
338 Clear corresponding bits in bitmap table according to given memory range.\r
339\r
340 @param[in] Address Memory address to unset from.\r
341 @param[in] NumberOfPages Number of pages to unset guard.\r
342\r
343 @return VOID.\r
344**/\r
345VOID\r
346EFIAPI\r
347ClearGuardedMemoryBits (\r
348 IN EFI_PHYSICAL_ADDRESS Address,\r
349 IN UINTN NumberOfPages\r
350 )\r
351{\r
352 UINT64 *BitMap;\r
353 UINTN Bits;\r
354 UINTN BitsToUnitEnd;\r
355\r
356 while (NumberOfPages > 0) {\r
357 BitsToUnitEnd = FindGuardedMemoryMap (Address, TRUE, &BitMap);\r
358 ASSERT (BitMap != NULL);\r
359\r
360 if (NumberOfPages > BitsToUnitEnd) {\r
361 // Cross map unit\r
362 Bits = BitsToUnitEnd;\r
363 } else {\r
364 Bits = NumberOfPages;\r
365 }\r
366\r
367 ClearBits (Address, Bits, BitMap);\r
368\r
369 NumberOfPages -= Bits;\r
370 Address += EFI_PAGES_TO_SIZE (Bits);\r
371 }\r
372}\r
373\r
374/**\r
375 Retrieve corresponding bits in bitmap table according to given memory range.\r
376\r
377 @param[in] Address Memory address to retrieve from.\r
378 @param[in] NumberOfPages Number of pages to retrieve.\r
379\r
380 @return An integer containing the guarded memory bitmap.\r
381**/\r
382UINTN\r
383GetGuardedMemoryBits (\r
384 IN EFI_PHYSICAL_ADDRESS Address,\r
385 IN UINTN NumberOfPages\r
386 )\r
387{\r
388 UINT64 *BitMap;\r
389 UINTN Bits;\r
390 UINTN Result;\r
391 UINTN Shift;\r
392 UINTN BitsToUnitEnd;\r
393\r
394 ASSERT (NumberOfPages <= GUARDED_HEAP_MAP_ENTRY_BITS);\r
395\r
396 Result = 0;\r
397 Shift = 0;\r
398 while (NumberOfPages > 0) {\r
399 BitsToUnitEnd = FindGuardedMemoryMap (Address, FALSE, &BitMap);\r
400\r
401 if (NumberOfPages > BitsToUnitEnd) {\r
402 // Cross map unit\r
403 Bits = BitsToUnitEnd;\r
404 } else {\r
405 Bits = NumberOfPages;\r
406 }\r
407\r
408 if (BitMap != NULL) {\r
409 Result |= LShiftU64 (GetBits (Address, Bits, BitMap), Shift);\r
410 }\r
411\r
412 Shift += Bits;\r
413 NumberOfPages -= Bits;\r
414 Address += EFI_PAGES_TO_SIZE (Bits);\r
415 }\r
416\r
417 return Result;\r
418}\r
419\r
420/**\r
421 Get bit value in bitmap table for the given address.\r
422\r
423 @param[in] Address The address to retrieve for.\r
424\r
425 @return 1 or 0.\r
426**/\r
427UINTN\r
428EFIAPI\r
429GetGuardMapBit (\r
430 IN EFI_PHYSICAL_ADDRESS Address\r
431 )\r
432{\r
433 UINT64 *GuardMap;\r
434\r
435 FindGuardedMemoryMap (Address, FALSE, &GuardMap);\r
436 if (GuardMap != NULL) {\r
437 if (RShiftU64 (*GuardMap,\r
438 GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address)) & 1) {\r
439 return 1;\r
440 }\r
441 }\r
442\r
443 return 0;\r
444}\r
445\r
446/**\r
447 Set the bit in bitmap table for the given address.\r
448\r
449 @param[in] Address The address to set for.\r
450\r
451 @return VOID.\r
452**/\r
453VOID\r
454EFIAPI\r
455SetGuardMapBit (\r
456 IN EFI_PHYSICAL_ADDRESS Address\r
457 )\r
458{\r
459 UINT64 *GuardMap;\r
460 UINT64 BitMask;\r
461\r
462 FindGuardedMemoryMap (Address, TRUE, &GuardMap);\r
463 if (GuardMap != NULL) {\r
464 BitMask = LShiftU64 (1, GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address));\r
465 *GuardMap |= BitMask;\r
466 }\r
467}\r
468\r
469/**\r
470 Clear the bit in bitmap table for the given address.\r
471\r
472 @param[in] Address The address to clear for.\r
473\r
474 @return VOID.\r
475**/\r
476VOID\r
477EFIAPI\r
478ClearGuardMapBit (\r
479 IN EFI_PHYSICAL_ADDRESS Address\r
480 )\r
481{\r
482 UINT64 *GuardMap;\r
483 UINT64 BitMask;\r
484\r
485 FindGuardedMemoryMap (Address, TRUE, &GuardMap);\r
486 if (GuardMap != NULL) {\r
487 BitMask = LShiftU64 (1, GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address));\r
488 *GuardMap &= ~BitMask;\r
489 }\r
490}\r
491\r
492/**\r
493 Check to see if the page at the given address is a Guard page or not.\r
494\r
495 @param[in] Address The address to check for.\r
496\r
497 @return TRUE The page at Address is a Guard page.\r
498 @return FALSE The page at Address is not a Guard page.\r
499**/\r
500BOOLEAN\r
501EFIAPI\r
502IsGuardPage (\r
503 IN EFI_PHYSICAL_ADDRESS Address\r
504 )\r
505{\r
506 UINTN BitMap;\r
507\r
508 //\r
509 // There must be at least one guarded page before and/or after given\r
510 // address if it's a Guard page. The bitmap pattern should be one of\r
511 // 001, 100 and 101\r
512 //\r
513 BitMap = GetGuardedMemoryBits (Address - EFI_PAGE_SIZE, 3);\r
514 return ((BitMap == BIT0) || (BitMap == BIT2) || (BitMap == (BIT2 | BIT0)));\r
515}\r
516\r
517/**\r
518 Check to see if the page at the given address is a head Guard page or not.\r
519\r
520 @param[in] Address The address to check for\r
521\r
522 @return TRUE The page at Address is a head Guard page\r
523 @return FALSE The page at Address is not a head Guard page\r
524**/\r
525BOOLEAN\r
526EFIAPI\r
527IsHeadGuard (\r
528 IN EFI_PHYSICAL_ADDRESS Address\r
529 )\r
530{\r
531 return (GetGuardedMemoryBits (Address, 2) == BIT1);\r
532}\r
533\r
534/**\r
535 Check to see if the page at the given address is a tail Guard page or not.\r
536\r
537 @param[in] Address The address to check for.\r
538\r
539 @return TRUE The page at Address is a tail Guard page.\r
540 @return FALSE The page at Address is not a tail Guard page.\r
541**/\r
542BOOLEAN\r
543EFIAPI\r
544IsTailGuard (\r
545 IN EFI_PHYSICAL_ADDRESS Address\r
546 )\r
547{\r
548 return (GetGuardedMemoryBits (Address - EFI_PAGE_SIZE, 2) == BIT0);\r
549}\r
550\r
551/**\r
552 Check to see if the page at the given address is guarded or not.\r
553\r
554 @param[in] Address The address to check for.\r
555\r
556 @return TRUE The page at Address is guarded.\r
557 @return FALSE The page at Address is not guarded.\r
558**/\r
559BOOLEAN\r
560EFIAPI\r
561IsMemoryGuarded (\r
562 IN EFI_PHYSICAL_ADDRESS Address\r
563 )\r
564{\r
565 return (GetGuardMapBit (Address) == 1);\r
566}\r
567\r
568/**\r
569 Set the page at the given address to be a Guard page.\r
570\r
571 This is done by changing the page table attribute to be NOT PRSENT.\r
572\r
573 @param[in] BaseAddress Page address to Guard at\r
574\r
575 @return VOID\r
576**/\r
577VOID\r
578EFIAPI\r
579SetGuardPage (\r
580 IN EFI_PHYSICAL_ADDRESS BaseAddress\r
581 )\r
582{\r
a5cd613c
JW
583 EFI_STATUS Status;\r
584\r
7fef06af
JW
585 if (gCpu == NULL) {\r
586 return;\r
587 }\r
588\r
e63da9f0
JW
589 //\r
590 // Set flag to make sure allocating memory without GUARD for page table\r
591 // operation; otherwise infinite loops could be caused.\r
592 //\r
593 mOnGuarding = TRUE;\r
594 //\r
595 // Note: This might overwrite other attributes needed by other features,\r
c44218e5 596 // such as NX memory protection.\r
e63da9f0 597 //\r
a5cd613c
JW
598 Status = gCpu->SetMemoryAttributes (gCpu, BaseAddress, EFI_PAGE_SIZE, EFI_MEMORY_RP);\r
599 ASSERT_EFI_ERROR (Status);\r
e63da9f0
JW
600 mOnGuarding = FALSE;\r
601}\r
602\r
603/**\r
604 Unset the Guard page at the given address to the normal memory.\r
605\r
606 This is done by changing the page table attribute to be PRSENT.\r
607\r
608 @param[in] BaseAddress Page address to Guard at.\r
609\r
610 @return VOID.\r
611**/\r
612VOID\r
613EFIAPI\r
614UnsetGuardPage (\r
615 IN EFI_PHYSICAL_ADDRESS BaseAddress\r
616 )\r
617{\r
c44218e5 618 UINT64 Attributes;\r
a5cd613c 619 EFI_STATUS Status;\r
c44218e5 620\r
7fef06af
JW
621 if (gCpu == NULL) {\r
622 return;\r
623 }\r
624\r
c44218e5
JW
625 //\r
626 // Once the Guard page is unset, it will be freed back to memory pool. NX\r
627 // memory protection must be restored for this page if NX is enabled for free\r
628 // memory.\r
629 //\r
630 Attributes = 0;\r
631 if ((PcdGet64 (PcdDxeNxMemoryProtectionPolicy) & (1 << EfiConventionalMemory)) != 0) {\r
632 Attributes |= EFI_MEMORY_XP;\r
633 }\r
634\r
e63da9f0
JW
635 //\r
636 // Set flag to make sure allocating memory without GUARD for page table\r
637 // operation; otherwise infinite loops could be caused.\r
638 //\r
639 mOnGuarding = TRUE;\r
640 //\r
641 // Note: This might overwrite other attributes needed by other features,\r
642 // such as memory protection (NX). Please make sure they are not enabled\r
643 // at the same time.\r
644 //\r
a5cd613c
JW
645 Status = gCpu->SetMemoryAttributes (gCpu, BaseAddress, EFI_PAGE_SIZE, Attributes);\r
646 ASSERT_EFI_ERROR (Status);\r
e63da9f0
JW
647 mOnGuarding = FALSE;\r
648}\r
649\r
650/**\r
651 Check to see if the memory at the given address should be guarded or not.\r
652\r
653 @param[in] MemoryType Memory type to check.\r
654 @param[in] AllocateType Allocation type to check.\r
655 @param[in] PageOrPool Indicate a page allocation or pool allocation.\r
656\r
657\r
658 @return TRUE The given type of memory should be guarded.\r
659 @return FALSE The given type of memory should not be guarded.\r
660**/\r
661BOOLEAN\r
662IsMemoryTypeToGuard (\r
663 IN EFI_MEMORY_TYPE MemoryType,\r
664 IN EFI_ALLOCATE_TYPE AllocateType,\r
665 IN UINT8 PageOrPool\r
666 )\r
667{\r
668 UINT64 TestBit;\r
669 UINT64 ConfigBit;\r
670 BOOLEAN InSmm;\r
671\r
7fef06af 672 if (AllocateType == AllocateAddress) {\r
e63da9f0
JW
673 return FALSE;\r
674 }\r
675\r
676 InSmm = FALSE;\r
677 if (gSmmBase2 != NULL) {\r
678 gSmmBase2->InSmm (gSmmBase2, &InSmm);\r
679 }\r
680\r
681 if (InSmm) {\r
682 return FALSE;\r
683 }\r
684\r
685 if ((PcdGet8 (PcdHeapGuardPropertyMask) & PageOrPool) == 0) {\r
686 return FALSE;\r
687 }\r
688\r
689 if (PageOrPool == GUARD_HEAP_TYPE_POOL) {\r
690 ConfigBit = PcdGet64 (PcdHeapGuardPoolType);\r
691 } else if (PageOrPool == GUARD_HEAP_TYPE_PAGE) {\r
692 ConfigBit = PcdGet64 (PcdHeapGuardPageType);\r
693 } else {\r
694 ConfigBit = (UINT64)-1;\r
695 }\r
696\r
697 if ((UINT32)MemoryType >= MEMORY_TYPE_OS_RESERVED_MIN) {\r
698 TestBit = BIT63;\r
699 } else if ((UINT32) MemoryType >= MEMORY_TYPE_OEM_RESERVED_MIN) {\r
700 TestBit = BIT62;\r
701 } else if (MemoryType < EfiMaxMemoryType) {\r
702 TestBit = LShiftU64 (1, MemoryType);\r
703 } else if (MemoryType == EfiMaxMemoryType) {\r
704 TestBit = (UINT64)-1;\r
705 } else {\r
706 TestBit = 0;\r
707 }\r
708\r
709 return ((ConfigBit & TestBit) != 0);\r
710}\r
711\r
712/**\r
713 Check to see if the pool at the given address should be guarded or not.\r
714\r
715 @param[in] MemoryType Pool type to check.\r
716\r
717\r
718 @return TRUE The given type of pool should be guarded.\r
719 @return FALSE The given type of pool should not be guarded.\r
720**/\r
721BOOLEAN\r
722IsPoolTypeToGuard (\r
723 IN EFI_MEMORY_TYPE MemoryType\r
724 )\r
725{\r
726 return IsMemoryTypeToGuard (MemoryType, AllocateAnyPages,\r
727 GUARD_HEAP_TYPE_POOL);\r
728}\r
729\r
730/**\r
731 Check to see if the page at the given address should be guarded or not.\r
732\r
733 @param[in] MemoryType Page type to check.\r
734 @param[in] AllocateType Allocation type to check.\r
735\r
736 @return TRUE The given type of page should be guarded.\r
737 @return FALSE The given type of page should not be guarded.\r
738**/\r
739BOOLEAN\r
740IsPageTypeToGuard (\r
741 IN EFI_MEMORY_TYPE MemoryType,\r
742 IN EFI_ALLOCATE_TYPE AllocateType\r
743 )\r
744{\r
745 return IsMemoryTypeToGuard (MemoryType, AllocateType, GUARD_HEAP_TYPE_PAGE);\r
746}\r
747\r
a6a0a597
JW
748/**\r
749 Check to see if the heap guard is enabled for page and/or pool allocation.\r
750\r
751 @return TRUE/FALSE.\r
752**/\r
753BOOLEAN\r
754IsHeapGuardEnabled (\r
755 VOID\r
756 )\r
757{\r
758 return IsMemoryTypeToGuard (EfiMaxMemoryType, AllocateAnyPages,\r
759 GUARD_HEAP_TYPE_POOL|GUARD_HEAP_TYPE_PAGE);\r
760}\r
761\r
e63da9f0
JW
762/**\r
763 Set head Guard and tail Guard for the given memory range.\r
764\r
765 @param[in] Memory Base address of memory to set guard for.\r
766 @param[in] NumberOfPages Memory size in pages.\r
767\r
768 @return VOID\r
769**/\r
770VOID\r
771SetGuardForMemory (\r
772 IN EFI_PHYSICAL_ADDRESS Memory,\r
773 IN UINTN NumberOfPages\r
774 )\r
775{\r
776 EFI_PHYSICAL_ADDRESS GuardPage;\r
777\r
778 //\r
779 // Set tail Guard\r
780 //\r
781 GuardPage = Memory + EFI_PAGES_TO_SIZE (NumberOfPages);\r
782 if (!IsGuardPage (GuardPage)) {\r
783 SetGuardPage (GuardPage);\r
784 }\r
785\r
786 // Set head Guard\r
787 GuardPage = Memory - EFI_PAGES_TO_SIZE (1);\r
788 if (!IsGuardPage (GuardPage)) {\r
789 SetGuardPage (GuardPage);\r
790 }\r
791\r
792 //\r
793 // Mark the memory range as Guarded\r
794 //\r
795 SetGuardedMemoryBits (Memory, NumberOfPages);\r
796}\r
797\r
798/**\r
799 Unset head Guard and tail Guard for the given memory range.\r
800\r
801 @param[in] Memory Base address of memory to unset guard for.\r
802 @param[in] NumberOfPages Memory size in pages.\r
803\r
804 @return VOID\r
805**/\r
806VOID\r
807UnsetGuardForMemory (\r
808 IN EFI_PHYSICAL_ADDRESS Memory,\r
809 IN UINTN NumberOfPages\r
810 )\r
811{\r
812 EFI_PHYSICAL_ADDRESS GuardPage;\r
6cf0a677 813 UINT64 GuardBitmap;\r
e63da9f0
JW
814\r
815 if (NumberOfPages == 0) {\r
816 return;\r
817 }\r
818\r
819 //\r
820 // Head Guard must be one page before, if any.\r
821 //\r
6cf0a677
JW
822 // MSB-> 1 0 <-LSB\r
823 // -------------------\r
824 // Head Guard -> 0 1 -> Don't free Head Guard (shared Guard)\r
825 // Head Guard -> 0 0 -> Free Head Guard either (not shared Guard)\r
826 // 1 X -> Don't free first page (need a new Guard)\r
827 // (it'll be turned into a Guard page later)\r
828 // -------------------\r
829 // Start -> -1 -2\r
830 //\r
e63da9f0 831 GuardPage = Memory - EFI_PAGES_TO_SIZE (1);\r
6cf0a677
JW
832 GuardBitmap = GetGuardedMemoryBits (Memory - EFI_PAGES_TO_SIZE (2), 2);\r
833 if ((GuardBitmap & BIT1) == 0) {\r
834 //\r
835 // Head Guard exists.\r
836 //\r
837 if ((GuardBitmap & BIT0) == 0) {\r
e63da9f0
JW
838 //\r
839 // If the head Guard is not a tail Guard of adjacent memory block,\r
840 // unset it.\r
841 //\r
842 UnsetGuardPage (GuardPage);\r
843 }\r
6cf0a677 844 } else {\r
e63da9f0
JW
845 //\r
846 // Pages before memory to free are still in Guard. It's a partial free\r
847 // case. Turn first page of memory block to free into a new Guard.\r
848 //\r
849 SetGuardPage (Memory);\r
850 }\r
851\r
852 //\r
853 // Tail Guard must be the page after this memory block to free, if any.\r
854 //\r
6cf0a677
JW
855 // MSB-> 1 0 <-LSB\r
856 // --------------------\r
857 // 1 0 <- Tail Guard -> Don't free Tail Guard (shared Guard)\r
858 // 0 0 <- Tail Guard -> Free Tail Guard either (not shared Guard)\r
859 // X 1 -> Don't free last page (need a new Guard)\r
860 // (it'll be turned into a Guard page later)\r
861 // --------------------\r
862 // +1 +0 <- End\r
863 //\r
e63da9f0 864 GuardPage = Memory + EFI_PAGES_TO_SIZE (NumberOfPages);\r
6cf0a677
JW
865 GuardBitmap = GetGuardedMemoryBits (GuardPage, 2);\r
866 if ((GuardBitmap & BIT0) == 0) {\r
867 //\r
868 // Tail Guard exists.\r
869 //\r
870 if ((GuardBitmap & BIT1) == 0) {\r
e63da9f0
JW
871 //\r
872 // If the tail Guard is not a head Guard of adjacent memory block,\r
873 // free it; otherwise, keep it.\r
874 //\r
875 UnsetGuardPage (GuardPage);\r
876 }\r
6cf0a677 877 } else {\r
e63da9f0
JW
878 //\r
879 // Pages after memory to free are still in Guard. It's a partial free\r
880 // case. We need to keep one page to be a head Guard.\r
881 //\r
882 SetGuardPage (GuardPage - EFI_PAGES_TO_SIZE (1));\r
883 }\r
884\r
885 //\r
886 // No matter what, we just clear the mark of the Guarded memory.\r
887 //\r
888 ClearGuardedMemoryBits(Memory, NumberOfPages);\r
889}\r
890\r
891/**\r
892 Adjust address of free memory according to existing and/or required Guard.\r
893\r
894 This function will check if there're existing Guard pages of adjacent\r
895 memory blocks, and try to use it as the Guard page of the memory to be\r
896 allocated.\r
897\r
898 @param[in] Start Start address of free memory block.\r
899 @param[in] Size Size of free memory block.\r
900 @param[in] SizeRequested Size of memory to allocate.\r
901\r
902 @return The end address of memory block found.\r
903 @return 0 if no enough space for the required size of memory and its Guard.\r
904**/\r
905UINT64\r
906AdjustMemoryS (\r
907 IN UINT64 Start,\r
908 IN UINT64 Size,\r
909 IN UINT64 SizeRequested\r
910 )\r
911{\r
912 UINT64 Target;\r
913\r
c44218e5
JW
914 //\r
915 // UEFI spec requires that allocated pool must be 8-byte aligned. If it's\r
916 // indicated to put the pool near the Tail Guard, we need extra bytes to\r
917 // make sure alignment of the returned pool address.\r
918 //\r
919 if ((PcdGet8 (PcdHeapGuardPropertyMask) & BIT7) == 0) {\r
920 SizeRequested = ALIGN_VALUE(SizeRequested, 8);\r
921 }\r
922\r
e63da9f0 923 Target = Start + Size - SizeRequested;\r
dd12683e
JW
924 ASSERT (Target >= Start);\r
925 if (Target == 0) {\r
926 return 0;\r
927 }\r
e63da9f0 928\r
e63da9f0
JW
929 if (!IsGuardPage (Start + Size)) {\r
930 // No Guard at tail to share. One more page is needed.\r
931 Target -= EFI_PAGES_TO_SIZE (1);\r
932 }\r
933\r
934 // Out of range?\r
935 if (Target < Start) {\r
936 return 0;\r
937 }\r
938\r
939 // At the edge?\r
940 if (Target == Start) {\r
941 if (!IsGuardPage (Target - EFI_PAGES_TO_SIZE (1))) {\r
942 // No enough space for a new head Guard if no Guard at head to share.\r
943 return 0;\r
944 }\r
945 }\r
946\r
947 // OK, we have enough pages for memory and its Guards. Return the End of the\r
948 // free space.\r
949 return Target + SizeRequested - 1;\r
950}\r
951\r
952/**\r
953 Adjust the start address and number of pages to free according to Guard.\r
954\r
955 The purpose of this function is to keep the shared Guard page with adjacent\r
956 memory block if it's still in guard, or free it if no more sharing. Another\r
957 is to reserve pages as Guard pages in partial page free situation.\r
958\r
959 @param[in,out] Memory Base address of memory to free.\r
960 @param[in,out] NumberOfPages Size of memory to free.\r
961\r
962 @return VOID.\r
963**/\r
964VOID\r
965AdjustMemoryF (\r
966 IN OUT EFI_PHYSICAL_ADDRESS *Memory,\r
967 IN OUT UINTN *NumberOfPages\r
968 )\r
969{\r
970 EFI_PHYSICAL_ADDRESS Start;\r
971 EFI_PHYSICAL_ADDRESS MemoryToTest;\r
972 UINTN PagesToFree;\r
6cf0a677 973 UINT64 GuardBitmap;\r
e63da9f0
JW
974\r
975 if (Memory == NULL || NumberOfPages == NULL || *NumberOfPages == 0) {\r
976 return;\r
977 }\r
978\r
979 Start = *Memory;\r
980 PagesToFree = *NumberOfPages;\r
981\r
982 //\r
983 // Head Guard must be one page before, if any.\r
984 //\r
6cf0a677
JW
985 // MSB-> 1 0 <-LSB\r
986 // -------------------\r
987 // Head Guard -> 0 1 -> Don't free Head Guard (shared Guard)\r
988 // Head Guard -> 0 0 -> Free Head Guard either (not shared Guard)\r
989 // 1 X -> Don't free first page (need a new Guard)\r
990 // (it'll be turned into a Guard page later)\r
991 // -------------------\r
992 // Start -> -1 -2\r
993 //\r
994 MemoryToTest = Start - EFI_PAGES_TO_SIZE (2);\r
995 GuardBitmap = GetGuardedMemoryBits (MemoryToTest, 2);\r
996 if ((GuardBitmap & BIT1) == 0) {\r
997 //\r
998 // Head Guard exists.\r
999 //\r
1000 if ((GuardBitmap & BIT0) == 0) {\r
e63da9f0
JW
1001 //\r
1002 // If the head Guard is not a tail Guard of adjacent memory block,\r
1003 // free it; otherwise, keep it.\r
1004 //\r
1005 Start -= EFI_PAGES_TO_SIZE (1);\r
1006 PagesToFree += 1;\r
1007 }\r
6cf0a677 1008 } else {\r
e63da9f0 1009 //\r
6cf0a677
JW
1010 // No Head Guard, and pages before memory to free are still in Guard. It's a\r
1011 // partial free case. We need to keep one page to be a tail Guard.\r
e63da9f0
JW
1012 //\r
1013 Start += EFI_PAGES_TO_SIZE (1);\r
1014 PagesToFree -= 1;\r
1015 }\r
1016\r
1017 //\r
1018 // Tail Guard must be the page after this memory block to free, if any.\r
1019 //\r
6cf0a677
JW
1020 // MSB-> 1 0 <-LSB\r
1021 // --------------------\r
1022 // 1 0 <- Tail Guard -> Don't free Tail Guard (shared Guard)\r
1023 // 0 0 <- Tail Guard -> Free Tail Guard either (not shared Guard)\r
1024 // X 1 -> Don't free last page (need a new Guard)\r
1025 // (it'll be turned into a Guard page later)\r
1026 // --------------------\r
1027 // +1 +0 <- End\r
1028 //\r
e63da9f0 1029 MemoryToTest = Start + EFI_PAGES_TO_SIZE (PagesToFree);\r
6cf0a677
JW
1030 GuardBitmap = GetGuardedMemoryBits (MemoryToTest, 2);\r
1031 if ((GuardBitmap & BIT0) == 0) {\r
1032 //\r
1033 // Tail Guard exists.\r
1034 //\r
1035 if ((GuardBitmap & BIT1) == 0) {\r
e63da9f0
JW
1036 //\r
1037 // If the tail Guard is not a head Guard of adjacent memory block,\r
1038 // free it; otherwise, keep it.\r
1039 //\r
1040 PagesToFree += 1;\r
1041 }\r
6cf0a677 1042 } else if (PagesToFree > 0) {\r
e63da9f0 1043 //\r
6cf0a677
JW
1044 // No Tail Guard, and pages after memory to free are still in Guard. It's a\r
1045 // partial free case. We need to keep one page to be a head Guard.\r
e63da9f0
JW
1046 //\r
1047 PagesToFree -= 1;\r
1048 }\r
1049\r
1050 *Memory = Start;\r
1051 *NumberOfPages = PagesToFree;\r
1052}\r
1053\r
1054/**\r
1055 Adjust the base and number of pages to really allocate according to Guard.\r
1056\r
1057 @param[in,out] Memory Base address of free memory.\r
1058 @param[in,out] NumberOfPages Size of memory to allocate.\r
1059\r
1060 @return VOID.\r
1061**/\r
1062VOID\r
1063AdjustMemoryA (\r
1064 IN OUT EFI_PHYSICAL_ADDRESS *Memory,\r
1065 IN OUT UINTN *NumberOfPages\r
1066 )\r
1067{\r
1068 //\r
1069 // FindFreePages() has already taken the Guard into account. It's safe to\r
1070 // adjust the start address and/or number of pages here, to make sure that\r
1071 // the Guards are also "allocated".\r
1072 //\r
1073 if (!IsGuardPage (*Memory + EFI_PAGES_TO_SIZE (*NumberOfPages))) {\r
1074 // No tail Guard, add one.\r
1075 *NumberOfPages += 1;\r
1076 }\r
1077\r
1078 if (!IsGuardPage (*Memory - EFI_PAGE_SIZE)) {\r
1079 // No head Guard, add one.\r
1080 *Memory -= EFI_PAGE_SIZE;\r
1081 *NumberOfPages += 1;\r
1082 }\r
1083}\r
1084\r
1085/**\r
1086 Adjust the pool head position to make sure the Guard page is adjavent to\r
1087 pool tail or pool head.\r
1088\r
1089 @param[in] Memory Base address of memory allocated.\r
1090 @param[in] NoPages Number of pages actually allocated.\r
1091 @param[in] Size Size of memory requested.\r
1092 (plus pool head/tail overhead)\r
1093\r
1094 @return Address of pool head.\r
1095**/\r
1096VOID *\r
1097AdjustPoolHeadA (\r
1098 IN EFI_PHYSICAL_ADDRESS Memory,\r
1099 IN UINTN NoPages,\r
1100 IN UINTN Size\r
1101 )\r
1102{\r
c44218e5 1103 if (Memory == 0 || (PcdGet8 (PcdHeapGuardPropertyMask) & BIT7) != 0) {\r
e63da9f0
JW
1104 //\r
1105 // Pool head is put near the head Guard\r
1106 //\r
1107 return (VOID *)(UINTN)Memory;\r
1108 }\r
1109\r
1110 //\r
1111 // Pool head is put near the tail Guard\r
1112 //\r
c44218e5 1113 Size = ALIGN_VALUE (Size, 8);\r
e63da9f0
JW
1114 return (VOID *)(UINTN)(Memory + EFI_PAGES_TO_SIZE (NoPages) - Size);\r
1115}\r
1116\r
1117/**\r
1118 Get the page base address according to pool head address.\r
1119\r
1120 @param[in] Memory Head address of pool to free.\r
1121\r
1122 @return Address of pool head.\r
1123**/\r
1124VOID *\r
1125AdjustPoolHeadF (\r
1126 IN EFI_PHYSICAL_ADDRESS Memory\r
1127 )\r
1128{\r
c44218e5 1129 if (Memory == 0 || (PcdGet8 (PcdHeapGuardPropertyMask) & BIT7) != 0) {\r
e63da9f0
JW
1130 //\r
1131 // Pool head is put near the head Guard\r
1132 //\r
1133 return (VOID *)(UINTN)Memory;\r
1134 }\r
1135\r
1136 //\r
1137 // Pool head is put near the tail Guard\r
1138 //\r
1139 return (VOID *)(UINTN)(Memory & ~EFI_PAGE_MASK);\r
1140}\r
1141\r
1142/**\r
1143 Allocate or free guarded memory.\r
1144\r
1145 @param[in] Start Start address of memory to allocate or free.\r
1146 @param[in] NumberOfPages Memory size in pages.\r
1147 @param[in] NewType Memory type to convert to.\r
1148\r
1149 @return VOID.\r
1150**/\r
1151EFI_STATUS\r
1152CoreConvertPagesWithGuard (\r
1153 IN UINT64 Start,\r
1154 IN UINTN NumberOfPages,\r
1155 IN EFI_MEMORY_TYPE NewType\r
1156 )\r
1157{\r
425d2569
JW
1158 UINT64 OldStart;\r
1159 UINTN OldPages;\r
1160\r
e63da9f0 1161 if (NewType == EfiConventionalMemory) {\r
425d2569
JW
1162 OldStart = Start;\r
1163 OldPages = NumberOfPages;\r
1164\r
e63da9f0 1165 AdjustMemoryF (&Start, &NumberOfPages);\r
425d2569
JW
1166 //\r
1167 // It's safe to unset Guard page inside memory lock because there should\r
1168 // be no memory allocation occurred in updating memory page attribute at\r
1169 // this point. And unsetting Guard page before free will prevent Guard\r
1170 // page just freed back to pool from being allocated right away before\r
1171 // marking it usable (from non-present to present).\r
1172 //\r
1173 UnsetGuardForMemory (OldStart, OldPages);\r
1263ecf2
JW
1174 if (NumberOfPages == 0) {\r
1175 return EFI_SUCCESS;\r
1176 }\r
e63da9f0
JW
1177 } else {\r
1178 AdjustMemoryA (&Start, &NumberOfPages);\r
1179 }\r
1180\r
6cf0a677 1181 return CoreConvertPages (Start, NumberOfPages, NewType);\r
e63da9f0
JW
1182}\r
1183\r
7fef06af
JW
1184/**\r
1185 Set all Guard pages which cannot be set before CPU Arch Protocol installed.\r
1186**/\r
1187VOID\r
1188SetAllGuardPages (\r
1189 VOID\r
1190 )\r
1191{\r
1192 UINTN Entries[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1193 UINTN Shifts[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1194 UINTN Indices[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1195 UINT64 Tables[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1196 UINT64 Addresses[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1197 UINT64 TableEntry;\r
1198 UINT64 Address;\r
1199 UINT64 GuardPage;\r
1200 INTN Level;\r
1201 UINTN Index;\r
1202 BOOLEAN OnGuarding;\r
1203\r
1204 if (mGuardedMemoryMap == 0 ||\r
1205 mMapLevel == 0 ||\r
1206 mMapLevel > GUARDED_HEAP_MAP_TABLE_DEPTH) {\r
1207 return;\r
1208 }\r
1209\r
1210 CopyMem (Entries, mLevelMask, sizeof (Entries));\r
1211 CopyMem (Shifts, mLevelShift, sizeof (Shifts));\r
1212\r
1213 SetMem (Tables, sizeof(Tables), 0);\r
1214 SetMem (Addresses, sizeof(Addresses), 0);\r
1215 SetMem (Indices, sizeof(Indices), 0);\r
1216\r
1217 Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;\r
1218 Tables[Level] = mGuardedMemoryMap;\r
1219 Address = 0;\r
1220 OnGuarding = FALSE;\r
1221\r
1222 DEBUG_CODE (\r
1223 DumpGuardedMemoryBitmap ();\r
1224 );\r
1225\r
1226 while (TRUE) {\r
1227 if (Indices[Level] > Entries[Level]) {\r
1228 Tables[Level] = 0;\r
1229 Level -= 1;\r
1230 } else {\r
1231\r
1232 TableEntry = ((UINT64 *)(UINTN)(Tables[Level]))[Indices[Level]];\r
1233 Address = Addresses[Level];\r
1234\r
1235 if (TableEntry == 0) {\r
1236\r
1237 OnGuarding = FALSE;\r
1238\r
1239 } else if (Level < GUARDED_HEAP_MAP_TABLE_DEPTH - 1) {\r
1240\r
1241 Level += 1;\r
1242 Tables[Level] = TableEntry;\r
1243 Addresses[Level] = Address;\r
1244 Indices[Level] = 0;\r
1245\r
1246 continue;\r
1247\r
1248 } else {\r
1249\r
1250 Index = 0;\r
1251 while (Index < GUARDED_HEAP_MAP_ENTRY_BITS) {\r
1252 if ((TableEntry & 1) == 1) {\r
1253 if (OnGuarding) {\r
1254 GuardPage = 0;\r
1255 } else {\r
1256 GuardPage = Address - EFI_PAGE_SIZE;\r
1257 }\r
1258 OnGuarding = TRUE;\r
1259 } else {\r
1260 if (OnGuarding) {\r
1261 GuardPage = Address;\r
1262 } else {\r
1263 GuardPage = 0;\r
1264 }\r
1265 OnGuarding = FALSE;\r
1266 }\r
1267\r
1268 if (GuardPage != 0) {\r
1269 SetGuardPage (GuardPage);\r
1270 }\r
1271\r
1272 if (TableEntry == 0) {\r
1273 break;\r
1274 }\r
1275\r
1276 TableEntry = RShiftU64 (TableEntry, 1);\r
1277 Address += EFI_PAGE_SIZE;\r
1278 Index += 1;\r
1279 }\r
1280 }\r
1281 }\r
1282\r
1283 if (Level < (GUARDED_HEAP_MAP_TABLE_DEPTH - (INTN)mMapLevel)) {\r
1284 break;\r
1285 }\r
1286\r
1287 Indices[Level] += 1;\r
1288 Address = (Level == 0) ? 0 : Addresses[Level - 1];\r
1289 Addresses[Level] = Address | LShiftU64(Indices[Level], Shifts[Level]);\r
1290\r
1291 }\r
1292}\r
1293\r
1294/**\r
1295 Notify function used to set all Guard pages before CPU Arch Protocol installed.\r
1296**/\r
1297VOID\r
1298HeapGuardCpuArchProtocolNotify (\r
1299 VOID\r
1300 )\r
1301{\r
1302 ASSERT (gCpu != NULL);\r
1303 SetAllGuardPages ();\r
1304}\r
1305\r
e63da9f0
JW
1306/**\r
1307 Helper function to convert a UINT64 value in binary to a string.\r
1308\r
1309 @param[in] Value Value of a UINT64 integer.\r
1310 @param[out] BinString String buffer to contain the conversion result.\r
1311\r
1312 @return VOID.\r
1313**/\r
1314VOID\r
1315Uint64ToBinString (\r
1316 IN UINT64 Value,\r
1317 OUT CHAR8 *BinString\r
1318 )\r
1319{\r
1320 UINTN Index;\r
1321\r
1322 if (BinString == NULL) {\r
1323 return;\r
1324 }\r
1325\r
1326 for (Index = 64; Index > 0; --Index) {\r
1327 BinString[Index - 1] = '0' + (Value & 1);\r
1328 Value = RShiftU64 (Value, 1);\r
1329 }\r
1330 BinString[64] = '\0';\r
1331}\r
1332\r
1333/**\r
1334 Dump the guarded memory bit map.\r
1335**/\r
1336VOID\r
1337EFIAPI\r
1338DumpGuardedMemoryBitmap (\r
1339 VOID\r
1340 )\r
1341{\r
1342 UINTN Entries[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1343 UINTN Shifts[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1344 UINTN Indices[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1345 UINT64 Tables[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1346 UINT64 Addresses[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1347 UINT64 TableEntry;\r
1348 UINT64 Address;\r
1349 INTN Level;\r
1350 UINTN RepeatZero;\r
1351 CHAR8 String[GUARDED_HEAP_MAP_ENTRY_BITS + 1];\r
1352 CHAR8 *Ruler1;\r
1353 CHAR8 *Ruler2;\r
1354\r
c6c50165
JW
1355 if (mGuardedMemoryMap == 0 ||\r
1356 mMapLevel == 0 ||\r
1357 mMapLevel > GUARDED_HEAP_MAP_TABLE_DEPTH) {\r
e63da9f0
JW
1358 return;\r
1359 }\r
1360\r
1361 Ruler1 = " 3 2 1 0";\r
1362 Ruler2 = "FEDCBA9876543210FEDCBA9876543210FEDCBA9876543210FEDCBA9876543210";\r
1363\r
1364 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "============================="\r
1365 " Guarded Memory Bitmap "\r
1366 "==============================\r\n"));\r
1367 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, " %a\r\n", Ruler1));\r
1368 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, " %a\r\n", Ruler2));\r
1369\r
1370 CopyMem (Entries, mLevelMask, sizeof (Entries));\r
1371 CopyMem (Shifts, mLevelShift, sizeof (Shifts));\r
1372\r
1373 SetMem (Indices, sizeof(Indices), 0);\r
1374 SetMem (Tables, sizeof(Tables), 0);\r
1375 SetMem (Addresses, sizeof(Addresses), 0);\r
1376\r
1377 Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;\r
1378 Tables[Level] = mGuardedMemoryMap;\r
1379 Address = 0;\r
1380 RepeatZero = 0;\r
1381\r
1382 while (TRUE) {\r
1383 if (Indices[Level] > Entries[Level]) {\r
1384\r
1385 Tables[Level] = 0;\r
1386 Level -= 1;\r
1387 RepeatZero = 0;\r
1388\r
1389 DEBUG ((\r
1390 HEAP_GUARD_DEBUG_LEVEL,\r
1391 "========================================="\r
1392 "=========================================\r\n"\r
1393 ));\r
1394\r
1395 } else {\r
1396\r
1397 TableEntry = ((UINT64 *)(UINTN)Tables[Level])[Indices[Level]];\r
1398 Address = Addresses[Level];\r
1399\r
1400 if (TableEntry == 0) {\r
1401\r
1402 if (Level == GUARDED_HEAP_MAP_TABLE_DEPTH - 1) {\r
1403 if (RepeatZero == 0) {\r
1404 Uint64ToBinString(TableEntry, String);\r
1405 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "%016lx: %a\r\n", Address, String));\r
1406 } else if (RepeatZero == 1) {\r
1407 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "... : ...\r\n"));\r
1408 }\r
1409 RepeatZero += 1;\r
1410 }\r
1411\r
1412 } else if (Level < GUARDED_HEAP_MAP_TABLE_DEPTH - 1) {\r
1413\r
1414 Level += 1;\r
1415 Tables[Level] = TableEntry;\r
1416 Addresses[Level] = Address;\r
1417 Indices[Level] = 0;\r
1418 RepeatZero = 0;\r
1419\r
1420 continue;\r
1421\r
1422 } else {\r
1423\r
1424 RepeatZero = 0;\r
1425 Uint64ToBinString(TableEntry, String);\r
1426 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "%016lx: %a\r\n", Address, String));\r
1427\r
1428 }\r
1429 }\r
1430\r
1431 if (Level < (GUARDED_HEAP_MAP_TABLE_DEPTH - (INTN)mMapLevel)) {\r
1432 break;\r
1433 }\r
1434\r
1435 Indices[Level] += 1;\r
1436 Address = (Level == 0) ? 0 : Addresses[Level - 1];\r
1437 Addresses[Level] = Address | LShiftU64(Indices[Level], Shifts[Level]);\r
1438\r
1439 }\r
1440}\r
1441\r