]> git.proxmox.com Git - mirror_edk2.git/blame - MdeModulePkg/Core/Dxe/Mem/HeapGuard.c
MdeModulePkg/Mem: Initialize the variable MapMemory
[mirror_edk2.git] / MdeModulePkg / Core / Dxe / Mem / HeapGuard.c
CommitLineData
e63da9f0
JW
1/** @file\r
2 UEFI Heap Guard functions.\r
3\r
8b13bca9 4Copyright (c) 2017-2018, Intel Corporation. All rights reserved.<BR>\r
9d510e61 5SPDX-License-Identifier: BSD-2-Clause-Patent\r
e63da9f0
JW
6\r
7**/\r
8\r
9#include "DxeMain.h"\r
10#include "Imem.h"\r
11#include "HeapGuard.h"\r
12\r
13//\r
14// Global to avoid infinite reentrance of memory allocation when updating\r
15// page table attributes, which may need allocate pages for new PDE/PTE.\r
16//\r
17GLOBAL_REMOVE_IF_UNREFERENCED BOOLEAN mOnGuarding = FALSE;\r
18\r
19//\r
20// Pointer to table tracking the Guarded memory with bitmap, in which '1'\r
21// is used to indicate memory guarded. '0' might be free memory or Guard\r
22// page itself, depending on status of memory adjacent to it.\r
23//\r
24GLOBAL_REMOVE_IF_UNREFERENCED UINT64 mGuardedMemoryMap = 0;\r
25\r
26//\r
27// Current depth level of map table pointed by mGuardedMemoryMap.\r
28// mMapLevel must be initialized at least by 1. It will be automatically\r
29// updated according to the address of memory just tracked.\r
30//\r
31GLOBAL_REMOVE_IF_UNREFERENCED UINTN mMapLevel = 1;\r
32\r
33//\r
34// Shift and mask for each level of map table\r
35//\r
36GLOBAL_REMOVE_IF_UNREFERENCED UINTN mLevelShift[GUARDED_HEAP_MAP_TABLE_DEPTH]\r
37 = GUARDED_HEAP_MAP_TABLE_DEPTH_SHIFTS;\r
38GLOBAL_REMOVE_IF_UNREFERENCED UINTN mLevelMask[GUARDED_HEAP_MAP_TABLE_DEPTH]\r
39 = GUARDED_HEAP_MAP_TABLE_DEPTH_MASKS;\r
40\r
63ebde8e
JW
41//\r
42// Used for promoting freed but not used pages.\r
43//\r
44GLOBAL_REMOVE_IF_UNREFERENCED EFI_PHYSICAL_ADDRESS mLastPromotedPage = BASE_4GB;\r
45\r
e63da9f0
JW
46/**\r
47 Set corresponding bits in bitmap table to 1 according to the address.\r
48\r
49 @param[in] Address Start address to set for.\r
50 @param[in] BitNumber Number of bits to set.\r
51 @param[in] BitMap Pointer to bitmap which covers the Address.\r
52\r
53 @return VOID.\r
54**/\r
55STATIC\r
56VOID\r
57SetBits (\r
58 IN EFI_PHYSICAL_ADDRESS Address,\r
59 IN UINTN BitNumber,\r
60 IN UINT64 *BitMap\r
61 )\r
62{\r
63 UINTN Lsbs;\r
64 UINTN Qwords;\r
65 UINTN Msbs;\r
66 UINTN StartBit;\r
67 UINTN EndBit;\r
68\r
69 StartBit = (UINTN)GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address);\r
70 EndBit = (StartBit + BitNumber - 1) % GUARDED_HEAP_MAP_ENTRY_BITS;\r
71\r
36f2f049 72 if ((StartBit + BitNumber) >= GUARDED_HEAP_MAP_ENTRY_BITS) {\r
e63da9f0
JW
73 Msbs = (GUARDED_HEAP_MAP_ENTRY_BITS - StartBit) %\r
74 GUARDED_HEAP_MAP_ENTRY_BITS;\r
75 Lsbs = (EndBit + 1) % GUARDED_HEAP_MAP_ENTRY_BITS;\r
76 Qwords = (BitNumber - Msbs) / GUARDED_HEAP_MAP_ENTRY_BITS;\r
77 } else {\r
78 Msbs = BitNumber;\r
79 Lsbs = 0;\r
80 Qwords = 0;\r
81 }\r
82\r
83 if (Msbs > 0) {\r
84 *BitMap |= LShiftU64 (LShiftU64 (1, Msbs) - 1, StartBit);\r
85 BitMap += 1;\r
86 }\r
87\r
88 if (Qwords > 0) {\r
89 SetMem64 ((VOID *)BitMap, Qwords * GUARDED_HEAP_MAP_ENTRY_BYTES,\r
90 (UINT64)-1);\r
91 BitMap += Qwords;\r
92 }\r
93\r
94 if (Lsbs > 0) {\r
95 *BitMap |= (LShiftU64 (1, Lsbs) - 1);\r
96 }\r
97}\r
98\r
99/**\r
100 Set corresponding bits in bitmap table to 0 according to the address.\r
101\r
102 @param[in] Address Start address to set for.\r
103 @param[in] BitNumber Number of bits to set.\r
104 @param[in] BitMap Pointer to bitmap which covers the Address.\r
105\r
106 @return VOID.\r
107**/\r
108STATIC\r
109VOID\r
110ClearBits (\r
111 IN EFI_PHYSICAL_ADDRESS Address,\r
112 IN UINTN BitNumber,\r
113 IN UINT64 *BitMap\r
114 )\r
115{\r
116 UINTN Lsbs;\r
117 UINTN Qwords;\r
118 UINTN Msbs;\r
119 UINTN StartBit;\r
120 UINTN EndBit;\r
121\r
122 StartBit = (UINTN)GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address);\r
123 EndBit = (StartBit + BitNumber - 1) % GUARDED_HEAP_MAP_ENTRY_BITS;\r
124\r
36f2f049 125 if ((StartBit + BitNumber) >= GUARDED_HEAP_MAP_ENTRY_BITS) {\r
e63da9f0
JW
126 Msbs = (GUARDED_HEAP_MAP_ENTRY_BITS - StartBit) %\r
127 GUARDED_HEAP_MAP_ENTRY_BITS;\r
128 Lsbs = (EndBit + 1) % GUARDED_HEAP_MAP_ENTRY_BITS;\r
129 Qwords = (BitNumber - Msbs) / GUARDED_HEAP_MAP_ENTRY_BITS;\r
130 } else {\r
131 Msbs = BitNumber;\r
132 Lsbs = 0;\r
133 Qwords = 0;\r
134 }\r
135\r
136 if (Msbs > 0) {\r
137 *BitMap &= ~LShiftU64 (LShiftU64 (1, Msbs) - 1, StartBit);\r
138 BitMap += 1;\r
139 }\r
140\r
141 if (Qwords > 0) {\r
142 SetMem64 ((VOID *)BitMap, Qwords * GUARDED_HEAP_MAP_ENTRY_BYTES, 0);\r
143 BitMap += Qwords;\r
144 }\r
145\r
146 if (Lsbs > 0) {\r
147 *BitMap &= ~(LShiftU64 (1, Lsbs) - 1);\r
148 }\r
149}\r
150\r
151/**\r
152 Get corresponding bits in bitmap table according to the address.\r
153\r
154 The value of bit 0 corresponds to the status of memory at given Address.\r
155 No more than 64 bits can be retrieved in one call.\r
156\r
157 @param[in] Address Start address to retrieve bits for.\r
158 @param[in] BitNumber Number of bits to get.\r
159 @param[in] BitMap Pointer to bitmap which covers the Address.\r
160\r
161 @return An integer containing the bits information.\r
162**/\r
163STATIC\r
164UINT64\r
165GetBits (\r
166 IN EFI_PHYSICAL_ADDRESS Address,\r
167 IN UINTN BitNumber,\r
168 IN UINT64 *BitMap\r
169 )\r
170{\r
171 UINTN StartBit;\r
172 UINTN EndBit;\r
173 UINTN Lsbs;\r
174 UINTN Msbs;\r
175 UINT64 Result;\r
176\r
177 ASSERT (BitNumber <= GUARDED_HEAP_MAP_ENTRY_BITS);\r
178\r
179 StartBit = (UINTN)GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address);\r
180 EndBit = (StartBit + BitNumber - 1) % GUARDED_HEAP_MAP_ENTRY_BITS;\r
181\r
182 if ((StartBit + BitNumber) > GUARDED_HEAP_MAP_ENTRY_BITS) {\r
183 Msbs = GUARDED_HEAP_MAP_ENTRY_BITS - StartBit;\r
184 Lsbs = (EndBit + 1) % GUARDED_HEAP_MAP_ENTRY_BITS;\r
185 } else {\r
186 Msbs = BitNumber;\r
187 Lsbs = 0;\r
188 }\r
189\r
36f2f049
JW
190 if (StartBit == 0 && BitNumber == GUARDED_HEAP_MAP_ENTRY_BITS) {\r
191 Result = *BitMap;\r
192 } else {\r
193 Result = RShiftU64((*BitMap), StartBit) & (LShiftU64(1, Msbs) - 1);\r
194 if (Lsbs > 0) {\r
195 BitMap += 1;\r
196 Result |= LShiftU64 ((*BitMap) & (LShiftU64 (1, Lsbs) - 1), Msbs);\r
197 }\r
e63da9f0
JW
198 }\r
199\r
200 return Result;\r
201}\r
202\r
203/**\r
204 Locate the pointer of bitmap from the guarded memory bitmap tables, which\r
205 covers the given Address.\r
206\r
207 @param[in] Address Start address to search the bitmap for.\r
208 @param[in] AllocMapUnit Flag to indicate memory allocation for the table.\r
209 @param[out] BitMap Pointer to bitmap which covers the Address.\r
210\r
211 @return The bit number from given Address to the end of current map table.\r
212**/\r
213UINTN\r
214FindGuardedMemoryMap (\r
215 IN EFI_PHYSICAL_ADDRESS Address,\r
216 IN BOOLEAN AllocMapUnit,\r
217 OUT UINT64 **BitMap\r
218 )\r
219{\r
220 UINTN Level;\r
221 UINT64 *GuardMap;\r
222 UINT64 MapMemory;\r
223 UINTN Index;\r
224 UINTN Size;\r
225 UINTN BitsToUnitEnd;\r
226 EFI_STATUS Status;\r
227\r
17efd446
SZ
228 MapMemory = 0;\r
229\r
e63da9f0
JW
230 //\r
231 // Adjust current map table depth according to the address to access\r
232 //\r
dd12683e
JW
233 while (AllocMapUnit &&\r
234 mMapLevel < GUARDED_HEAP_MAP_TABLE_DEPTH &&\r
e63da9f0
JW
235 RShiftU64 (\r
236 Address,\r
237 mLevelShift[GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel - 1]\r
238 ) != 0) {\r
239\r
240 if (mGuardedMemoryMap != 0) {\r
241 Size = (mLevelMask[GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel - 1] + 1)\r
242 * GUARDED_HEAP_MAP_ENTRY_BYTES;\r
243 Status = CoreInternalAllocatePages (\r
244 AllocateAnyPages,\r
245 EfiBootServicesData,\r
246 EFI_SIZE_TO_PAGES (Size),\r
247 &MapMemory,\r
248 FALSE\r
249 );\r
250 ASSERT_EFI_ERROR (Status);\r
251 ASSERT (MapMemory != 0);\r
252\r
253 SetMem ((VOID *)(UINTN)MapMemory, Size, 0);\r
254\r
255 *(UINT64 *)(UINTN)MapMemory = mGuardedMemoryMap;\r
256 mGuardedMemoryMap = MapMemory;\r
257 }\r
258\r
259 mMapLevel++;\r
260\r
261 }\r
262\r
263 GuardMap = &mGuardedMemoryMap;\r
264 for (Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;\r
265 Level < GUARDED_HEAP_MAP_TABLE_DEPTH;\r
266 ++Level) {\r
267\r
268 if (*GuardMap == 0) {\r
269 if (!AllocMapUnit) {\r
270 GuardMap = NULL;\r
271 break;\r
272 }\r
273\r
274 Size = (mLevelMask[Level] + 1) * GUARDED_HEAP_MAP_ENTRY_BYTES;\r
275 Status = CoreInternalAllocatePages (\r
276 AllocateAnyPages,\r
277 EfiBootServicesData,\r
278 EFI_SIZE_TO_PAGES (Size),\r
279 &MapMemory,\r
280 FALSE\r
281 );\r
282 ASSERT_EFI_ERROR (Status);\r
283 ASSERT (MapMemory != 0);\r
284\r
285 SetMem ((VOID *)(UINTN)MapMemory, Size, 0);\r
286 *GuardMap = MapMemory;\r
287 }\r
288\r
289 Index = (UINTN)RShiftU64 (Address, mLevelShift[Level]);\r
290 Index &= mLevelMask[Level];\r
291 GuardMap = (UINT64 *)(UINTN)((*GuardMap) + Index * sizeof (UINT64));\r
292\r
293 }\r
294\r
295 BitsToUnitEnd = GUARDED_HEAP_MAP_BITS - GUARDED_HEAP_MAP_BIT_INDEX (Address);\r
296 *BitMap = GuardMap;\r
297\r
298 return BitsToUnitEnd;\r
299}\r
300\r
301/**\r
302 Set corresponding bits in bitmap table to 1 according to given memory range.\r
303\r
304 @param[in] Address Memory address to guard from.\r
305 @param[in] NumberOfPages Number of pages to guard.\r
306\r
307 @return VOID.\r
308**/\r
309VOID\r
310EFIAPI\r
311SetGuardedMemoryBits (\r
312 IN EFI_PHYSICAL_ADDRESS Address,\r
313 IN UINTN NumberOfPages\r
314 )\r
315{\r
316 UINT64 *BitMap;\r
317 UINTN Bits;\r
318 UINTN BitsToUnitEnd;\r
319\r
320 while (NumberOfPages > 0) {\r
321 BitsToUnitEnd = FindGuardedMemoryMap (Address, TRUE, &BitMap);\r
322 ASSERT (BitMap != NULL);\r
323\r
324 if (NumberOfPages > BitsToUnitEnd) {\r
325 // Cross map unit\r
326 Bits = BitsToUnitEnd;\r
327 } else {\r
328 Bits = NumberOfPages;\r
329 }\r
330\r
331 SetBits (Address, Bits, BitMap);\r
332\r
333 NumberOfPages -= Bits;\r
334 Address += EFI_PAGES_TO_SIZE (Bits);\r
335 }\r
336}\r
337\r
338/**\r
339 Clear corresponding bits in bitmap table according to given memory range.\r
340\r
341 @param[in] Address Memory address to unset from.\r
342 @param[in] NumberOfPages Number of pages to unset guard.\r
343\r
344 @return VOID.\r
345**/\r
346VOID\r
347EFIAPI\r
348ClearGuardedMemoryBits (\r
349 IN EFI_PHYSICAL_ADDRESS Address,\r
350 IN UINTN NumberOfPages\r
351 )\r
352{\r
353 UINT64 *BitMap;\r
354 UINTN Bits;\r
355 UINTN BitsToUnitEnd;\r
356\r
357 while (NumberOfPages > 0) {\r
358 BitsToUnitEnd = FindGuardedMemoryMap (Address, TRUE, &BitMap);\r
359 ASSERT (BitMap != NULL);\r
360\r
361 if (NumberOfPages > BitsToUnitEnd) {\r
362 // Cross map unit\r
363 Bits = BitsToUnitEnd;\r
364 } else {\r
365 Bits = NumberOfPages;\r
366 }\r
367\r
368 ClearBits (Address, Bits, BitMap);\r
369\r
370 NumberOfPages -= Bits;\r
371 Address += EFI_PAGES_TO_SIZE (Bits);\r
372 }\r
373}\r
374\r
375/**\r
376 Retrieve corresponding bits in bitmap table according to given memory range.\r
377\r
378 @param[in] Address Memory address to retrieve from.\r
379 @param[in] NumberOfPages Number of pages to retrieve.\r
380\r
381 @return An integer containing the guarded memory bitmap.\r
382**/\r
63ebde8e 383UINT64\r
e63da9f0
JW
384GetGuardedMemoryBits (\r
385 IN EFI_PHYSICAL_ADDRESS Address,\r
386 IN UINTN NumberOfPages\r
387 )\r
388{\r
389 UINT64 *BitMap;\r
390 UINTN Bits;\r
63ebde8e 391 UINT64 Result;\r
e63da9f0
JW
392 UINTN Shift;\r
393 UINTN BitsToUnitEnd;\r
394\r
395 ASSERT (NumberOfPages <= GUARDED_HEAP_MAP_ENTRY_BITS);\r
396\r
397 Result = 0;\r
398 Shift = 0;\r
399 while (NumberOfPages > 0) {\r
400 BitsToUnitEnd = FindGuardedMemoryMap (Address, FALSE, &BitMap);\r
401\r
402 if (NumberOfPages > BitsToUnitEnd) {\r
403 // Cross map unit\r
404 Bits = BitsToUnitEnd;\r
405 } else {\r
406 Bits = NumberOfPages;\r
407 }\r
408\r
409 if (BitMap != NULL) {\r
410 Result |= LShiftU64 (GetBits (Address, Bits, BitMap), Shift);\r
411 }\r
412\r
413 Shift += Bits;\r
414 NumberOfPages -= Bits;\r
415 Address += EFI_PAGES_TO_SIZE (Bits);\r
416 }\r
417\r
418 return Result;\r
419}\r
420\r
421/**\r
422 Get bit value in bitmap table for the given address.\r
423\r
424 @param[in] Address The address to retrieve for.\r
425\r
426 @return 1 or 0.\r
427**/\r
428UINTN\r
429EFIAPI\r
430GetGuardMapBit (\r
431 IN EFI_PHYSICAL_ADDRESS Address\r
432 )\r
433{\r
434 UINT64 *GuardMap;\r
435\r
436 FindGuardedMemoryMap (Address, FALSE, &GuardMap);\r
437 if (GuardMap != NULL) {\r
438 if (RShiftU64 (*GuardMap,\r
439 GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address)) & 1) {\r
440 return 1;\r
441 }\r
442 }\r
443\r
444 return 0;\r
445}\r
446\r
e63da9f0
JW
447\r
448/**\r
449 Check to see if the page at the given address is a Guard page or not.\r
450\r
451 @param[in] Address The address to check for.\r
452\r
453 @return TRUE The page at Address is a Guard page.\r
454 @return FALSE The page at Address is not a Guard page.\r
455**/\r
456BOOLEAN\r
457EFIAPI\r
458IsGuardPage (\r
459 IN EFI_PHYSICAL_ADDRESS Address\r
460 )\r
461{\r
e5001ab7 462 UINT64 BitMap;\r
e63da9f0
JW
463\r
464 //\r
465 // There must be at least one guarded page before and/or after given\r
466 // address if it's a Guard page. The bitmap pattern should be one of\r
467 // 001, 100 and 101\r
468 //\r
469 BitMap = GetGuardedMemoryBits (Address - EFI_PAGE_SIZE, 3);\r
470 return ((BitMap == BIT0) || (BitMap == BIT2) || (BitMap == (BIT2 | BIT0)));\r
471}\r
472\r
e63da9f0
JW
473\r
474/**\r
475 Check to see if the page at the given address is guarded or not.\r
476\r
477 @param[in] Address The address to check for.\r
478\r
479 @return TRUE The page at Address is guarded.\r
480 @return FALSE The page at Address is not guarded.\r
481**/\r
482BOOLEAN\r
483EFIAPI\r
484IsMemoryGuarded (\r
485 IN EFI_PHYSICAL_ADDRESS Address\r
486 )\r
487{\r
488 return (GetGuardMapBit (Address) == 1);\r
489}\r
490\r
491/**\r
492 Set the page at the given address to be a Guard page.\r
493\r
494 This is done by changing the page table attribute to be NOT PRSENT.\r
495\r
496 @param[in] BaseAddress Page address to Guard at\r
497\r
498 @return VOID\r
499**/\r
500VOID\r
501EFIAPI\r
502SetGuardPage (\r
503 IN EFI_PHYSICAL_ADDRESS BaseAddress\r
504 )\r
505{\r
a5cd613c
JW
506 EFI_STATUS Status;\r
507\r
7fef06af
JW
508 if (gCpu == NULL) {\r
509 return;\r
510 }\r
511\r
e63da9f0
JW
512 //\r
513 // Set flag to make sure allocating memory without GUARD for page table\r
514 // operation; otherwise infinite loops could be caused.\r
515 //\r
516 mOnGuarding = TRUE;\r
517 //\r
518 // Note: This might overwrite other attributes needed by other features,\r
c44218e5 519 // such as NX memory protection.\r
e63da9f0 520 //\r
a5cd613c
JW
521 Status = gCpu->SetMemoryAttributes (gCpu, BaseAddress, EFI_PAGE_SIZE, EFI_MEMORY_RP);\r
522 ASSERT_EFI_ERROR (Status);\r
e63da9f0
JW
523 mOnGuarding = FALSE;\r
524}\r
525\r
526/**\r
527 Unset the Guard page at the given address to the normal memory.\r
528\r
529 This is done by changing the page table attribute to be PRSENT.\r
530\r
531 @param[in] BaseAddress Page address to Guard at.\r
532\r
533 @return VOID.\r
534**/\r
535VOID\r
536EFIAPI\r
537UnsetGuardPage (\r
538 IN EFI_PHYSICAL_ADDRESS BaseAddress\r
539 )\r
540{\r
c44218e5 541 UINT64 Attributes;\r
a5cd613c 542 EFI_STATUS Status;\r
c44218e5 543\r
7fef06af
JW
544 if (gCpu == NULL) {\r
545 return;\r
546 }\r
547\r
c44218e5
JW
548 //\r
549 // Once the Guard page is unset, it will be freed back to memory pool. NX\r
550 // memory protection must be restored for this page if NX is enabled for free\r
551 // memory.\r
552 //\r
553 Attributes = 0;\r
554 if ((PcdGet64 (PcdDxeNxMemoryProtectionPolicy) & (1 << EfiConventionalMemory)) != 0) {\r
555 Attributes |= EFI_MEMORY_XP;\r
556 }\r
557\r
e63da9f0
JW
558 //\r
559 // Set flag to make sure allocating memory without GUARD for page table\r
560 // operation; otherwise infinite loops could be caused.\r
561 //\r
562 mOnGuarding = TRUE;\r
563 //\r
564 // Note: This might overwrite other attributes needed by other features,\r
565 // such as memory protection (NX). Please make sure they are not enabled\r
566 // at the same time.\r
567 //\r
a5cd613c
JW
568 Status = gCpu->SetMemoryAttributes (gCpu, BaseAddress, EFI_PAGE_SIZE, Attributes);\r
569 ASSERT_EFI_ERROR (Status);\r
e63da9f0
JW
570 mOnGuarding = FALSE;\r
571}\r
572\r
573/**\r
574 Check to see if the memory at the given address should be guarded or not.\r
575\r
576 @param[in] MemoryType Memory type to check.\r
577 @param[in] AllocateType Allocation type to check.\r
578 @param[in] PageOrPool Indicate a page allocation or pool allocation.\r
579\r
580\r
581 @return TRUE The given type of memory should be guarded.\r
582 @return FALSE The given type of memory should not be guarded.\r
583**/\r
584BOOLEAN\r
585IsMemoryTypeToGuard (\r
586 IN EFI_MEMORY_TYPE MemoryType,\r
587 IN EFI_ALLOCATE_TYPE AllocateType,\r
588 IN UINT8 PageOrPool\r
589 )\r
590{\r
591 UINT64 TestBit;\r
592 UINT64 ConfigBit;\r
e63da9f0 593\r
7fef06af 594 if (AllocateType == AllocateAddress) {\r
e63da9f0
JW
595 return FALSE;\r
596 }\r
597\r
e63da9f0
JW
598 if ((PcdGet8 (PcdHeapGuardPropertyMask) & PageOrPool) == 0) {\r
599 return FALSE;\r
600 }\r
601\r
602 if (PageOrPool == GUARD_HEAP_TYPE_POOL) {\r
603 ConfigBit = PcdGet64 (PcdHeapGuardPoolType);\r
604 } else if (PageOrPool == GUARD_HEAP_TYPE_PAGE) {\r
605 ConfigBit = PcdGet64 (PcdHeapGuardPageType);\r
606 } else {\r
607 ConfigBit = (UINT64)-1;\r
608 }\r
609\r
610 if ((UINT32)MemoryType >= MEMORY_TYPE_OS_RESERVED_MIN) {\r
611 TestBit = BIT63;\r
612 } else if ((UINT32) MemoryType >= MEMORY_TYPE_OEM_RESERVED_MIN) {\r
613 TestBit = BIT62;\r
614 } else if (MemoryType < EfiMaxMemoryType) {\r
615 TestBit = LShiftU64 (1, MemoryType);\r
616 } else if (MemoryType == EfiMaxMemoryType) {\r
617 TestBit = (UINT64)-1;\r
618 } else {\r
619 TestBit = 0;\r
620 }\r
621\r
622 return ((ConfigBit & TestBit) != 0);\r
623}\r
624\r
625/**\r
626 Check to see if the pool at the given address should be guarded or not.\r
627\r
628 @param[in] MemoryType Pool type to check.\r
629\r
630\r
631 @return TRUE The given type of pool should be guarded.\r
632 @return FALSE The given type of pool should not be guarded.\r
633**/\r
634BOOLEAN\r
635IsPoolTypeToGuard (\r
636 IN EFI_MEMORY_TYPE MemoryType\r
637 )\r
638{\r
639 return IsMemoryTypeToGuard (MemoryType, AllocateAnyPages,\r
640 GUARD_HEAP_TYPE_POOL);\r
641}\r
642\r
643/**\r
644 Check to see if the page at the given address should be guarded or not.\r
645\r
646 @param[in] MemoryType Page type to check.\r
647 @param[in] AllocateType Allocation type to check.\r
648\r
649 @return TRUE The given type of page should be guarded.\r
650 @return FALSE The given type of page should not be guarded.\r
651**/\r
652BOOLEAN\r
653IsPageTypeToGuard (\r
654 IN EFI_MEMORY_TYPE MemoryType,\r
655 IN EFI_ALLOCATE_TYPE AllocateType\r
656 )\r
657{\r
658 return IsMemoryTypeToGuard (MemoryType, AllocateType, GUARD_HEAP_TYPE_PAGE);\r
659}\r
660\r
a6a0a597
JW
661/**\r
662 Check to see if the heap guard is enabled for page and/or pool allocation.\r
663\r
63ebde8e
JW
664 @param[in] GuardType Specify the sub-type(s) of Heap Guard.\r
665\r
a6a0a597
JW
666 @return TRUE/FALSE.\r
667**/\r
668BOOLEAN\r
669IsHeapGuardEnabled (\r
63ebde8e 670 UINT8 GuardType\r
a6a0a597
JW
671 )\r
672{\r
63ebde8e 673 return IsMemoryTypeToGuard (EfiMaxMemoryType, AllocateAnyPages, GuardType);\r
a6a0a597
JW
674}\r
675\r
e63da9f0
JW
676/**\r
677 Set head Guard and tail Guard for the given memory range.\r
678\r
679 @param[in] Memory Base address of memory to set guard for.\r
680 @param[in] NumberOfPages Memory size in pages.\r
681\r
682 @return VOID\r
683**/\r
684VOID\r
685SetGuardForMemory (\r
686 IN EFI_PHYSICAL_ADDRESS Memory,\r
687 IN UINTN NumberOfPages\r
688 )\r
689{\r
690 EFI_PHYSICAL_ADDRESS GuardPage;\r
691\r
692 //\r
693 // Set tail Guard\r
694 //\r
695 GuardPage = Memory + EFI_PAGES_TO_SIZE (NumberOfPages);\r
696 if (!IsGuardPage (GuardPage)) {\r
697 SetGuardPage (GuardPage);\r
698 }\r
699\r
700 // Set head Guard\r
701 GuardPage = Memory - EFI_PAGES_TO_SIZE (1);\r
702 if (!IsGuardPage (GuardPage)) {\r
703 SetGuardPage (GuardPage);\r
704 }\r
705\r
706 //\r
707 // Mark the memory range as Guarded\r
708 //\r
709 SetGuardedMemoryBits (Memory, NumberOfPages);\r
710}\r
711\r
712/**\r
713 Unset head Guard and tail Guard for the given memory range.\r
714\r
715 @param[in] Memory Base address of memory to unset guard for.\r
716 @param[in] NumberOfPages Memory size in pages.\r
717\r
718 @return VOID\r
719**/\r
720VOID\r
721UnsetGuardForMemory (\r
722 IN EFI_PHYSICAL_ADDRESS Memory,\r
723 IN UINTN NumberOfPages\r
724 )\r
725{\r
726 EFI_PHYSICAL_ADDRESS GuardPage;\r
6cf0a677 727 UINT64 GuardBitmap;\r
e63da9f0
JW
728\r
729 if (NumberOfPages == 0) {\r
730 return;\r
731 }\r
732\r
733 //\r
734 // Head Guard must be one page before, if any.\r
735 //\r
6cf0a677
JW
736 // MSB-> 1 0 <-LSB\r
737 // -------------------\r
738 // Head Guard -> 0 1 -> Don't free Head Guard (shared Guard)\r
739 // Head Guard -> 0 0 -> Free Head Guard either (not shared Guard)\r
740 // 1 X -> Don't free first page (need a new Guard)\r
741 // (it'll be turned into a Guard page later)\r
742 // -------------------\r
743 // Start -> -1 -2\r
744 //\r
e63da9f0 745 GuardPage = Memory - EFI_PAGES_TO_SIZE (1);\r
6cf0a677
JW
746 GuardBitmap = GetGuardedMemoryBits (Memory - EFI_PAGES_TO_SIZE (2), 2);\r
747 if ((GuardBitmap & BIT1) == 0) {\r
748 //\r
749 // Head Guard exists.\r
750 //\r
751 if ((GuardBitmap & BIT0) == 0) {\r
e63da9f0
JW
752 //\r
753 // If the head Guard is not a tail Guard of adjacent memory block,\r
754 // unset it.\r
755 //\r
756 UnsetGuardPage (GuardPage);\r
757 }\r
6cf0a677 758 } else {\r
e63da9f0
JW
759 //\r
760 // Pages before memory to free are still in Guard. It's a partial free\r
761 // case. Turn first page of memory block to free into a new Guard.\r
762 //\r
763 SetGuardPage (Memory);\r
764 }\r
765\r
766 //\r
767 // Tail Guard must be the page after this memory block to free, if any.\r
768 //\r
6cf0a677
JW
769 // MSB-> 1 0 <-LSB\r
770 // --------------------\r
771 // 1 0 <- Tail Guard -> Don't free Tail Guard (shared Guard)\r
772 // 0 0 <- Tail Guard -> Free Tail Guard either (not shared Guard)\r
773 // X 1 -> Don't free last page (need a new Guard)\r
774 // (it'll be turned into a Guard page later)\r
775 // --------------------\r
776 // +1 +0 <- End\r
777 //\r
e63da9f0 778 GuardPage = Memory + EFI_PAGES_TO_SIZE (NumberOfPages);\r
6cf0a677
JW
779 GuardBitmap = GetGuardedMemoryBits (GuardPage, 2);\r
780 if ((GuardBitmap & BIT0) == 0) {\r
781 //\r
782 // Tail Guard exists.\r
783 //\r
784 if ((GuardBitmap & BIT1) == 0) {\r
e63da9f0
JW
785 //\r
786 // If the tail Guard is not a head Guard of adjacent memory block,\r
787 // free it; otherwise, keep it.\r
788 //\r
789 UnsetGuardPage (GuardPage);\r
790 }\r
6cf0a677 791 } else {\r
e63da9f0
JW
792 //\r
793 // Pages after memory to free are still in Guard. It's a partial free\r
794 // case. We need to keep one page to be a head Guard.\r
795 //\r
796 SetGuardPage (GuardPage - EFI_PAGES_TO_SIZE (1));\r
797 }\r
798\r
799 //\r
800 // No matter what, we just clear the mark of the Guarded memory.\r
801 //\r
802 ClearGuardedMemoryBits(Memory, NumberOfPages);\r
803}\r
804\r
805/**\r
806 Adjust address of free memory according to existing and/or required Guard.\r
807\r
808 This function will check if there're existing Guard pages of adjacent\r
809 memory blocks, and try to use it as the Guard page of the memory to be\r
810 allocated.\r
811\r
812 @param[in] Start Start address of free memory block.\r
813 @param[in] Size Size of free memory block.\r
814 @param[in] SizeRequested Size of memory to allocate.\r
815\r
816 @return The end address of memory block found.\r
817 @return 0 if no enough space for the required size of memory and its Guard.\r
818**/\r
819UINT64\r
820AdjustMemoryS (\r
821 IN UINT64 Start,\r
822 IN UINT64 Size,\r
823 IN UINT64 SizeRequested\r
824 )\r
825{\r
826 UINT64 Target;\r
827\r
c44218e5
JW
828 //\r
829 // UEFI spec requires that allocated pool must be 8-byte aligned. If it's\r
830 // indicated to put the pool near the Tail Guard, we need extra bytes to\r
831 // make sure alignment of the returned pool address.\r
832 //\r
833 if ((PcdGet8 (PcdHeapGuardPropertyMask) & BIT7) == 0) {\r
834 SizeRequested = ALIGN_VALUE(SizeRequested, 8);\r
835 }\r
836\r
e63da9f0 837 Target = Start + Size - SizeRequested;\r
dd12683e
JW
838 ASSERT (Target >= Start);\r
839 if (Target == 0) {\r
840 return 0;\r
841 }\r
e63da9f0 842\r
e63da9f0
JW
843 if (!IsGuardPage (Start + Size)) {\r
844 // No Guard at tail to share. One more page is needed.\r
845 Target -= EFI_PAGES_TO_SIZE (1);\r
846 }\r
847\r
848 // Out of range?\r
849 if (Target < Start) {\r
850 return 0;\r
851 }\r
852\r
853 // At the edge?\r
854 if (Target == Start) {\r
855 if (!IsGuardPage (Target - EFI_PAGES_TO_SIZE (1))) {\r
856 // No enough space for a new head Guard if no Guard at head to share.\r
857 return 0;\r
858 }\r
859 }\r
860\r
861 // OK, we have enough pages for memory and its Guards. Return the End of the\r
862 // free space.\r
863 return Target + SizeRequested - 1;\r
864}\r
865\r
866/**\r
867 Adjust the start address and number of pages to free according to Guard.\r
868\r
869 The purpose of this function is to keep the shared Guard page with adjacent\r
870 memory block if it's still in guard, or free it if no more sharing. Another\r
871 is to reserve pages as Guard pages in partial page free situation.\r
872\r
873 @param[in,out] Memory Base address of memory to free.\r
874 @param[in,out] NumberOfPages Size of memory to free.\r
875\r
876 @return VOID.\r
877**/\r
878VOID\r
879AdjustMemoryF (\r
880 IN OUT EFI_PHYSICAL_ADDRESS *Memory,\r
881 IN OUT UINTN *NumberOfPages\r
882 )\r
883{\r
884 EFI_PHYSICAL_ADDRESS Start;\r
885 EFI_PHYSICAL_ADDRESS MemoryToTest;\r
886 UINTN PagesToFree;\r
6cf0a677 887 UINT64 GuardBitmap;\r
e63da9f0
JW
888\r
889 if (Memory == NULL || NumberOfPages == NULL || *NumberOfPages == 0) {\r
890 return;\r
891 }\r
892\r
893 Start = *Memory;\r
894 PagesToFree = *NumberOfPages;\r
895\r
896 //\r
897 // Head Guard must be one page before, if any.\r
898 //\r
6cf0a677
JW
899 // MSB-> 1 0 <-LSB\r
900 // -------------------\r
901 // Head Guard -> 0 1 -> Don't free Head Guard (shared Guard)\r
902 // Head Guard -> 0 0 -> Free Head Guard either (not shared Guard)\r
903 // 1 X -> Don't free first page (need a new Guard)\r
904 // (it'll be turned into a Guard page later)\r
905 // -------------------\r
906 // Start -> -1 -2\r
907 //\r
908 MemoryToTest = Start - EFI_PAGES_TO_SIZE (2);\r
909 GuardBitmap = GetGuardedMemoryBits (MemoryToTest, 2);\r
910 if ((GuardBitmap & BIT1) == 0) {\r
911 //\r
912 // Head Guard exists.\r
913 //\r
914 if ((GuardBitmap & BIT0) == 0) {\r
e63da9f0
JW
915 //\r
916 // If the head Guard is not a tail Guard of adjacent memory block,\r
917 // free it; otherwise, keep it.\r
918 //\r
919 Start -= EFI_PAGES_TO_SIZE (1);\r
920 PagesToFree += 1;\r
921 }\r
6cf0a677 922 } else {\r
e63da9f0 923 //\r
6cf0a677
JW
924 // No Head Guard, and pages before memory to free are still in Guard. It's a\r
925 // partial free case. We need to keep one page to be a tail Guard.\r
e63da9f0
JW
926 //\r
927 Start += EFI_PAGES_TO_SIZE (1);\r
928 PagesToFree -= 1;\r
929 }\r
930\r
931 //\r
932 // Tail Guard must be the page after this memory block to free, if any.\r
933 //\r
6cf0a677
JW
934 // MSB-> 1 0 <-LSB\r
935 // --------------------\r
936 // 1 0 <- Tail Guard -> Don't free Tail Guard (shared Guard)\r
937 // 0 0 <- Tail Guard -> Free Tail Guard either (not shared Guard)\r
938 // X 1 -> Don't free last page (need a new Guard)\r
939 // (it'll be turned into a Guard page later)\r
940 // --------------------\r
941 // +1 +0 <- End\r
942 //\r
e63da9f0 943 MemoryToTest = Start + EFI_PAGES_TO_SIZE (PagesToFree);\r
6cf0a677
JW
944 GuardBitmap = GetGuardedMemoryBits (MemoryToTest, 2);\r
945 if ((GuardBitmap & BIT0) == 0) {\r
946 //\r
947 // Tail Guard exists.\r
948 //\r
949 if ((GuardBitmap & BIT1) == 0) {\r
e63da9f0
JW
950 //\r
951 // If the tail Guard is not a head Guard of adjacent memory block,\r
952 // free it; otherwise, keep it.\r
953 //\r
954 PagesToFree += 1;\r
955 }\r
6cf0a677 956 } else if (PagesToFree > 0) {\r
e63da9f0 957 //\r
6cf0a677
JW
958 // No Tail Guard, and pages after memory to free are still in Guard. It's a\r
959 // partial free case. We need to keep one page to be a head Guard.\r
e63da9f0
JW
960 //\r
961 PagesToFree -= 1;\r
962 }\r
963\r
964 *Memory = Start;\r
965 *NumberOfPages = PagesToFree;\r
966}\r
967\r
968/**\r
969 Adjust the base and number of pages to really allocate according to Guard.\r
970\r
971 @param[in,out] Memory Base address of free memory.\r
972 @param[in,out] NumberOfPages Size of memory to allocate.\r
973\r
974 @return VOID.\r
975**/\r
976VOID\r
977AdjustMemoryA (\r
978 IN OUT EFI_PHYSICAL_ADDRESS *Memory,\r
979 IN OUT UINTN *NumberOfPages\r
980 )\r
981{\r
982 //\r
983 // FindFreePages() has already taken the Guard into account. It's safe to\r
984 // adjust the start address and/or number of pages here, to make sure that\r
985 // the Guards are also "allocated".\r
986 //\r
987 if (!IsGuardPage (*Memory + EFI_PAGES_TO_SIZE (*NumberOfPages))) {\r
988 // No tail Guard, add one.\r
989 *NumberOfPages += 1;\r
990 }\r
991\r
992 if (!IsGuardPage (*Memory - EFI_PAGE_SIZE)) {\r
993 // No head Guard, add one.\r
994 *Memory -= EFI_PAGE_SIZE;\r
995 *NumberOfPages += 1;\r
996 }\r
997}\r
998\r
999/**\r
1000 Adjust the pool head position to make sure the Guard page is adjavent to\r
1001 pool tail or pool head.\r
1002\r
1003 @param[in] Memory Base address of memory allocated.\r
1004 @param[in] NoPages Number of pages actually allocated.\r
1005 @param[in] Size Size of memory requested.\r
1006 (plus pool head/tail overhead)\r
1007\r
1008 @return Address of pool head.\r
1009**/\r
1010VOID *\r
1011AdjustPoolHeadA (\r
1012 IN EFI_PHYSICAL_ADDRESS Memory,\r
1013 IN UINTN NoPages,\r
1014 IN UINTN Size\r
1015 )\r
1016{\r
c44218e5 1017 if (Memory == 0 || (PcdGet8 (PcdHeapGuardPropertyMask) & BIT7) != 0) {\r
e63da9f0
JW
1018 //\r
1019 // Pool head is put near the head Guard\r
1020 //\r
1021 return (VOID *)(UINTN)Memory;\r
1022 }\r
1023\r
1024 //\r
1025 // Pool head is put near the tail Guard\r
1026 //\r
c44218e5 1027 Size = ALIGN_VALUE (Size, 8);\r
e63da9f0
JW
1028 return (VOID *)(UINTN)(Memory + EFI_PAGES_TO_SIZE (NoPages) - Size);\r
1029}\r
1030\r
1031/**\r
1032 Get the page base address according to pool head address.\r
1033\r
1034 @param[in] Memory Head address of pool to free.\r
1035\r
1036 @return Address of pool head.\r
1037**/\r
1038VOID *\r
1039AdjustPoolHeadF (\r
1040 IN EFI_PHYSICAL_ADDRESS Memory\r
1041 )\r
1042{\r
c44218e5 1043 if (Memory == 0 || (PcdGet8 (PcdHeapGuardPropertyMask) & BIT7) != 0) {\r
e63da9f0
JW
1044 //\r
1045 // Pool head is put near the head Guard\r
1046 //\r
1047 return (VOID *)(UINTN)Memory;\r
1048 }\r
1049\r
1050 //\r
1051 // Pool head is put near the tail Guard\r
1052 //\r
1053 return (VOID *)(UINTN)(Memory & ~EFI_PAGE_MASK);\r
1054}\r
1055\r
1056/**\r
1057 Allocate or free guarded memory.\r
1058\r
1059 @param[in] Start Start address of memory to allocate or free.\r
1060 @param[in] NumberOfPages Memory size in pages.\r
1061 @param[in] NewType Memory type to convert to.\r
1062\r
1063 @return VOID.\r
1064**/\r
1065EFI_STATUS\r
1066CoreConvertPagesWithGuard (\r
1067 IN UINT64 Start,\r
1068 IN UINTN NumberOfPages,\r
1069 IN EFI_MEMORY_TYPE NewType\r
1070 )\r
1071{\r
425d2569
JW
1072 UINT64 OldStart;\r
1073 UINTN OldPages;\r
1074\r
e63da9f0 1075 if (NewType == EfiConventionalMemory) {\r
425d2569
JW
1076 OldStart = Start;\r
1077 OldPages = NumberOfPages;\r
1078\r
e63da9f0 1079 AdjustMemoryF (&Start, &NumberOfPages);\r
425d2569
JW
1080 //\r
1081 // It's safe to unset Guard page inside memory lock because there should\r
1082 // be no memory allocation occurred in updating memory page attribute at\r
1083 // this point. And unsetting Guard page before free will prevent Guard\r
1084 // page just freed back to pool from being allocated right away before\r
1085 // marking it usable (from non-present to present).\r
1086 //\r
1087 UnsetGuardForMemory (OldStart, OldPages);\r
1263ecf2
JW
1088 if (NumberOfPages == 0) {\r
1089 return EFI_SUCCESS;\r
1090 }\r
e63da9f0
JW
1091 } else {\r
1092 AdjustMemoryA (&Start, &NumberOfPages);\r
1093 }\r
1094\r
6cf0a677 1095 return CoreConvertPages (Start, NumberOfPages, NewType);\r
e63da9f0
JW
1096}\r
1097\r
7fef06af
JW
1098/**\r
1099 Set all Guard pages which cannot be set before CPU Arch Protocol installed.\r
1100**/\r
1101VOID\r
1102SetAllGuardPages (\r
1103 VOID\r
1104 )\r
1105{\r
1106 UINTN Entries[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1107 UINTN Shifts[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1108 UINTN Indices[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1109 UINT64 Tables[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1110 UINT64 Addresses[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1111 UINT64 TableEntry;\r
1112 UINT64 Address;\r
1113 UINT64 GuardPage;\r
1114 INTN Level;\r
1115 UINTN Index;\r
1116 BOOLEAN OnGuarding;\r
1117\r
1118 if (mGuardedMemoryMap == 0 ||\r
1119 mMapLevel == 0 ||\r
1120 mMapLevel > GUARDED_HEAP_MAP_TABLE_DEPTH) {\r
1121 return;\r
1122 }\r
1123\r
1124 CopyMem (Entries, mLevelMask, sizeof (Entries));\r
1125 CopyMem (Shifts, mLevelShift, sizeof (Shifts));\r
1126\r
1127 SetMem (Tables, sizeof(Tables), 0);\r
1128 SetMem (Addresses, sizeof(Addresses), 0);\r
1129 SetMem (Indices, sizeof(Indices), 0);\r
1130\r
1131 Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;\r
1132 Tables[Level] = mGuardedMemoryMap;\r
1133 Address = 0;\r
1134 OnGuarding = FALSE;\r
1135\r
1136 DEBUG_CODE (\r
1137 DumpGuardedMemoryBitmap ();\r
1138 );\r
1139\r
1140 while (TRUE) {\r
1141 if (Indices[Level] > Entries[Level]) {\r
1142 Tables[Level] = 0;\r
1143 Level -= 1;\r
1144 } else {\r
1145\r
1146 TableEntry = ((UINT64 *)(UINTN)(Tables[Level]))[Indices[Level]];\r
1147 Address = Addresses[Level];\r
1148\r
1149 if (TableEntry == 0) {\r
1150\r
1151 OnGuarding = FALSE;\r
1152\r
1153 } else if (Level < GUARDED_HEAP_MAP_TABLE_DEPTH - 1) {\r
1154\r
1155 Level += 1;\r
1156 Tables[Level] = TableEntry;\r
1157 Addresses[Level] = Address;\r
1158 Indices[Level] = 0;\r
1159\r
1160 continue;\r
1161\r
1162 } else {\r
1163\r
1164 Index = 0;\r
1165 while (Index < GUARDED_HEAP_MAP_ENTRY_BITS) {\r
1166 if ((TableEntry & 1) == 1) {\r
1167 if (OnGuarding) {\r
1168 GuardPage = 0;\r
1169 } else {\r
1170 GuardPage = Address - EFI_PAGE_SIZE;\r
1171 }\r
1172 OnGuarding = TRUE;\r
1173 } else {\r
1174 if (OnGuarding) {\r
1175 GuardPage = Address;\r
1176 } else {\r
1177 GuardPage = 0;\r
1178 }\r
1179 OnGuarding = FALSE;\r
1180 }\r
1181\r
1182 if (GuardPage != 0) {\r
1183 SetGuardPage (GuardPage);\r
1184 }\r
1185\r
1186 if (TableEntry == 0) {\r
1187 break;\r
1188 }\r
1189\r
1190 TableEntry = RShiftU64 (TableEntry, 1);\r
1191 Address += EFI_PAGE_SIZE;\r
1192 Index += 1;\r
1193 }\r
1194 }\r
1195 }\r
1196\r
1197 if (Level < (GUARDED_HEAP_MAP_TABLE_DEPTH - (INTN)mMapLevel)) {\r
1198 break;\r
1199 }\r
1200\r
1201 Indices[Level] += 1;\r
1202 Address = (Level == 0) ? 0 : Addresses[Level - 1];\r
1203 Addresses[Level] = Address | LShiftU64(Indices[Level], Shifts[Level]);\r
1204\r
1205 }\r
1206}\r
1207\r
63ebde8e
JW
1208/**\r
1209 Find the address of top-most guarded free page.\r
1210\r
1211 @param[out] Address Start address of top-most guarded free page.\r
1212\r
1213 @return VOID.\r
1214**/\r
1215VOID\r
1216GetLastGuardedFreePageAddress (\r
1217 OUT EFI_PHYSICAL_ADDRESS *Address\r
1218 )\r
1219{\r
1220 EFI_PHYSICAL_ADDRESS AddressGranularity;\r
1221 EFI_PHYSICAL_ADDRESS BaseAddress;\r
1222 UINTN Level;\r
1223 UINT64 Map;\r
1224 INTN Index;\r
1225\r
1226 ASSERT (mMapLevel >= 1);\r
1227\r
1228 BaseAddress = 0;\r
1229 Map = mGuardedMemoryMap;\r
1230 for (Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;\r
1231 Level < GUARDED_HEAP_MAP_TABLE_DEPTH;\r
1232 ++Level) {\r
1233 AddressGranularity = LShiftU64 (1, mLevelShift[Level]);\r
1234\r
1235 //\r
1236 // Find the non-NULL entry at largest index.\r
1237 //\r
1238 for (Index = (INTN)mLevelMask[Level]; Index >= 0 ; --Index) {\r
1239 if (((UINT64 *)(UINTN)Map)[Index] != 0) {\r
1240 BaseAddress += MultU64x32 (AddressGranularity, (UINT32)Index);\r
1241 Map = ((UINT64 *)(UINTN)Map)[Index];\r
1242 break;\r
1243 }\r
1244 }\r
1245 }\r
1246\r
1247 //\r
1248 // Find the non-zero MSB then get the page address.\r
1249 //\r
1250 while (Map != 0) {\r
1251 Map = RShiftU64 (Map, 1);\r
1252 BaseAddress += EFI_PAGES_TO_SIZE (1);\r
1253 }\r
1254\r
1255 *Address = BaseAddress;\r
1256}\r
1257\r
1258/**\r
1259 Record freed pages.\r
1260\r
1261 @param[in] BaseAddress Base address of just freed pages.\r
1262 @param[in] Pages Number of freed pages.\r
1263\r
1264 @return VOID.\r
1265**/\r
1266VOID\r
1267MarkFreedPages (\r
1268 IN EFI_PHYSICAL_ADDRESS BaseAddress,\r
1269 IN UINTN Pages\r
1270 )\r
1271{\r
1272 SetGuardedMemoryBits (BaseAddress, Pages);\r
1273}\r
1274\r
1275/**\r
1276 Record freed pages as well as mark them as not-present.\r
1277\r
1278 @param[in] BaseAddress Base address of just freed pages.\r
1279 @param[in] Pages Number of freed pages.\r
1280\r
1281 @return VOID.\r
1282**/\r
1283VOID\r
1284EFIAPI\r
1285GuardFreedPages (\r
1286 IN EFI_PHYSICAL_ADDRESS BaseAddress,\r
1287 IN UINTN Pages\r
1288 )\r
1289{\r
1290 EFI_STATUS Status;\r
1291\r
1292 //\r
1293 // Legacy memory lower than 1MB might be accessed with no allocation. Leave\r
1294 // them alone.\r
1295 //\r
1296 if (BaseAddress < BASE_1MB) {\r
1297 return;\r
1298 }\r
1299\r
1300 MarkFreedPages (BaseAddress, Pages);\r
1301 if (gCpu != NULL) {\r
1302 //\r
1303 // Set flag to make sure allocating memory without GUARD for page table\r
1304 // operation; otherwise infinite loops could be caused.\r
1305 //\r
1306 mOnGuarding = TRUE;\r
1307 //\r
1308 // Note: This might overwrite other attributes needed by other features,\r
1309 // such as NX memory protection.\r
1310 //\r
1311 Status = gCpu->SetMemoryAttributes (\r
1312 gCpu,\r
1313 BaseAddress,\r
1314 EFI_PAGES_TO_SIZE (Pages),\r
1315 EFI_MEMORY_RP\r
1316 );\r
1317 //\r
1318 // Normally we should ASSERT the returned Status. But there might be memory\r
1319 // alloc/free involved in SetMemoryAttributes(), which might fail this\r
1320 // calling. It's rare case so it's OK to let a few tiny holes be not-guarded.\r
1321 //\r
1322 if (EFI_ERROR (Status)) {\r
1323 DEBUG ((DEBUG_WARN, "Failed to guard freed pages: %p (%lu)\n", BaseAddress, (UINT64)Pages));\r
1324 }\r
1325 mOnGuarding = FALSE;\r
1326 }\r
1327}\r
1328\r
1329/**\r
1330 Record freed pages as well as mark them as not-present, if enabled.\r
1331\r
1332 @param[in] BaseAddress Base address of just freed pages.\r
1333 @param[in] Pages Number of freed pages.\r
1334\r
1335 @return VOID.\r
1336**/\r
1337VOID\r
1338EFIAPI\r
1339GuardFreedPagesChecked (\r
1340 IN EFI_PHYSICAL_ADDRESS BaseAddress,\r
1341 IN UINTN Pages\r
1342 )\r
1343{\r
1344 if (IsHeapGuardEnabled (GUARD_HEAP_TYPE_FREED)) {\r
1345 GuardFreedPages (BaseAddress, Pages);\r
1346 }\r
1347}\r
1348\r
1349/**\r
1350 Mark all pages freed before CPU Arch Protocol as not-present.\r
1351\r
1352**/\r
1353VOID\r
1354GuardAllFreedPages (\r
1355 VOID\r
1356 )\r
1357{\r
1358 UINTN Entries[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1359 UINTN Shifts[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1360 UINTN Indices[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1361 UINT64 Tables[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1362 UINT64 Addresses[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1363 UINT64 TableEntry;\r
1364 UINT64 Address;\r
1365 UINT64 GuardPage;\r
1366 INTN Level;\r
e5001ab7 1367 UINT64 BitIndex;\r
63ebde8e
JW
1368 UINTN GuardPageNumber;\r
1369\r
1370 if (mGuardedMemoryMap == 0 ||\r
1371 mMapLevel == 0 ||\r
1372 mMapLevel > GUARDED_HEAP_MAP_TABLE_DEPTH) {\r
1373 return;\r
1374 }\r
1375\r
1376 CopyMem (Entries, mLevelMask, sizeof (Entries));\r
1377 CopyMem (Shifts, mLevelShift, sizeof (Shifts));\r
1378\r
1379 SetMem (Tables, sizeof(Tables), 0);\r
1380 SetMem (Addresses, sizeof(Addresses), 0);\r
1381 SetMem (Indices, sizeof(Indices), 0);\r
1382\r
1383 Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;\r
1384 Tables[Level] = mGuardedMemoryMap;\r
1385 Address = 0;\r
1386 GuardPage = (UINT64)-1;\r
1387 GuardPageNumber = 0;\r
1388\r
1389 while (TRUE) {\r
1390 if (Indices[Level] > Entries[Level]) {\r
1391 Tables[Level] = 0;\r
1392 Level -= 1;\r
1393 } else {\r
1394 TableEntry = ((UINT64 *)(UINTN)(Tables[Level]))[Indices[Level]];\r
1395 Address = Addresses[Level];\r
1396\r
1397 if (Level < GUARDED_HEAP_MAP_TABLE_DEPTH - 1) {\r
1398 Level += 1;\r
1399 Tables[Level] = TableEntry;\r
1400 Addresses[Level] = Address;\r
1401 Indices[Level] = 0;\r
1402\r
1403 continue;\r
1404 } else {\r
1405 BitIndex = 1;\r
1406 while (BitIndex != 0) {\r
1407 if ((TableEntry & BitIndex) != 0) {\r
1408 if (GuardPage == (UINT64)-1) {\r
1409 GuardPage = Address;\r
1410 }\r
1411 ++GuardPageNumber;\r
1412 } else if (GuardPageNumber > 0) {\r
1413 GuardFreedPages (GuardPage, GuardPageNumber);\r
1414 GuardPageNumber = 0;\r
1415 GuardPage = (UINT64)-1;\r
1416 }\r
1417\r
1418 if (TableEntry == 0) {\r
1419 break;\r
1420 }\r
1421\r
1422 Address += EFI_PAGES_TO_SIZE (1);\r
1423 BitIndex = LShiftU64 (BitIndex, 1);\r
1424 }\r
1425 }\r
1426 }\r
1427\r
1428 if (Level < (GUARDED_HEAP_MAP_TABLE_DEPTH - (INTN)mMapLevel)) {\r
1429 break;\r
1430 }\r
1431\r
1432 Indices[Level] += 1;\r
1433 Address = (Level == 0) ? 0 : Addresses[Level - 1];\r
1434 Addresses[Level] = Address | LShiftU64 (Indices[Level], Shifts[Level]);\r
1435\r
1436 }\r
1437\r
1438 //\r
1439 // Update the maximum address of freed page which can be used for memory\r
1440 // promotion upon out-of-memory-space.\r
1441 //\r
1442 GetLastGuardedFreePageAddress (&Address);\r
1443 if (Address != 0) {\r
1444 mLastPromotedPage = Address;\r
1445 }\r
1446}\r
1447\r
1448/**\r
1449 This function checks to see if the given memory map descriptor in a memory map\r
1450 can be merged with any guarded free pages.\r
1451\r
1452 @param MemoryMapEntry A pointer to a descriptor in MemoryMap.\r
1453 @param MaxAddress Maximum address to stop the merge.\r
1454\r
1455 @return VOID\r
1456\r
1457**/\r
1458VOID\r
1459MergeGuardPages (\r
1460 IN EFI_MEMORY_DESCRIPTOR *MemoryMapEntry,\r
1461 IN EFI_PHYSICAL_ADDRESS MaxAddress\r
1462 )\r
1463{\r
1464 EFI_PHYSICAL_ADDRESS EndAddress;\r
1465 UINT64 Bitmap;\r
1466 INTN Pages;\r
1467\r
1468 if (!IsHeapGuardEnabled (GUARD_HEAP_TYPE_FREED) ||\r
1469 MemoryMapEntry->Type >= EfiMemoryMappedIO) {\r
1470 return;\r
1471 }\r
1472\r
1473 Bitmap = 0;\r
e5001ab7
JW
1474 Pages = EFI_SIZE_TO_PAGES ((UINTN)(MaxAddress - MemoryMapEntry->PhysicalStart));\r
1475 Pages -= (INTN)MemoryMapEntry->NumberOfPages;\r
63ebde8e
JW
1476 while (Pages > 0) {\r
1477 if (Bitmap == 0) {\r
1478 EndAddress = MemoryMapEntry->PhysicalStart +\r
e5001ab7 1479 EFI_PAGES_TO_SIZE ((UINTN)MemoryMapEntry->NumberOfPages);\r
63ebde8e
JW
1480 Bitmap = GetGuardedMemoryBits (EndAddress, GUARDED_HEAP_MAP_ENTRY_BITS);\r
1481 }\r
1482\r
1483 if ((Bitmap & 1) == 0) {\r
1484 break;\r
1485 }\r
1486\r
1487 Pages--;\r
1488 MemoryMapEntry->NumberOfPages++;\r
1489 Bitmap = RShiftU64 (Bitmap, 1);\r
1490 }\r
1491}\r
1492\r
1493/**\r
1494 Put part (at most 64 pages a time) guarded free pages back to free page pool.\r
1495\r
1496 Freed memory guard is used to detect Use-After-Free (UAF) memory issue, which\r
1497 makes use of 'Used then throw away' way to detect any illegal access to freed\r
1498 memory. The thrown-away memory will be marked as not-present so that any access\r
1499 to those memory (after free) will be caught by page-fault exception.\r
1500\r
1501 The problem is that this will consume lots of memory space. Once no memory\r
1502 left in pool to allocate, we have to restore part of the freed pages to their\r
1503 normal function. Otherwise the whole system will stop functioning.\r
1504\r
1505 @param StartAddress Start address of promoted memory.\r
1506 @param EndAddress End address of promoted memory.\r
1507\r
1508 @return TRUE Succeeded to promote memory.\r
1509 @return FALSE No free memory found.\r
1510\r
1511**/\r
1512BOOLEAN\r
1513PromoteGuardedFreePages (\r
1514 OUT EFI_PHYSICAL_ADDRESS *StartAddress,\r
1515 OUT EFI_PHYSICAL_ADDRESS *EndAddress\r
1516 )\r
1517{\r
1518 EFI_STATUS Status;\r
1519 UINTN AvailablePages;\r
1520 UINT64 Bitmap;\r
1521 EFI_PHYSICAL_ADDRESS Start;\r
1522\r
1523 if (!IsHeapGuardEnabled (GUARD_HEAP_TYPE_FREED)) {\r
1524 return FALSE;\r
1525 }\r
1526\r
1527 //\r
1528 // Similar to memory allocation service, always search the freed pages in\r
1529 // descending direction.\r
1530 //\r
1531 Start = mLastPromotedPage;\r
1532 AvailablePages = 0;\r
1533 while (AvailablePages == 0) {\r
1534 Start -= EFI_PAGES_TO_SIZE (GUARDED_HEAP_MAP_ENTRY_BITS);\r
1535 //\r
1536 // If the address wraps around, try the really freed pages at top.\r
1537 //\r
1538 if (Start > mLastPromotedPage) {\r
1539 GetLastGuardedFreePageAddress (&Start);\r
1540 ASSERT (Start != 0);\r
1541 Start -= EFI_PAGES_TO_SIZE (GUARDED_HEAP_MAP_ENTRY_BITS);\r
1542 }\r
1543\r
1544 Bitmap = GetGuardedMemoryBits (Start, GUARDED_HEAP_MAP_ENTRY_BITS);\r
1545 while (Bitmap > 0) {\r
1546 if ((Bitmap & 1) != 0) {\r
1547 ++AvailablePages;\r
1548 } else if (AvailablePages == 0) {\r
1549 Start += EFI_PAGES_TO_SIZE (1);\r
1550 } else {\r
1551 break;\r
1552 }\r
1553\r
1554 Bitmap = RShiftU64 (Bitmap, 1);\r
1555 }\r
1556 }\r
1557\r
895415ed 1558 if (AvailablePages != 0) {\r
63ebde8e
JW
1559 DEBUG ((DEBUG_INFO, "Promoted pages: %lX (%lx)\r\n", Start, (UINT64)AvailablePages));\r
1560 ClearGuardedMemoryBits (Start, AvailablePages);\r
1561\r
1562 if (gCpu != NULL) {\r
1563 //\r
1564 // Set flag to make sure allocating memory without GUARD for page table\r
1565 // operation; otherwise infinite loops could be caused.\r
1566 //\r
1567 mOnGuarding = TRUE;\r
1568 Status = gCpu->SetMemoryAttributes (gCpu, Start, EFI_PAGES_TO_SIZE(AvailablePages), 0);\r
1569 ASSERT_EFI_ERROR (Status);\r
1570 mOnGuarding = FALSE;\r
1571 }\r
1572\r
1573 mLastPromotedPage = Start;\r
1574 *StartAddress = Start;\r
1575 *EndAddress = Start + EFI_PAGES_TO_SIZE (AvailablePages) - 1;\r
1576 return TRUE;\r
1577 }\r
1578\r
1579 return FALSE;\r
1580}\r
1581\r
7fef06af
JW
1582/**\r
1583 Notify function used to set all Guard pages before CPU Arch Protocol installed.\r
1584**/\r
1585VOID\r
1586HeapGuardCpuArchProtocolNotify (\r
1587 VOID\r
1588 )\r
1589{\r
1590 ASSERT (gCpu != NULL);\r
63ebde8e
JW
1591\r
1592 if (IsHeapGuardEnabled (GUARD_HEAP_TYPE_PAGE|GUARD_HEAP_TYPE_POOL) &&\r
1593 IsHeapGuardEnabled (GUARD_HEAP_TYPE_FREED)) {\r
1594 DEBUG ((DEBUG_ERROR, "Heap guard and freed memory guard cannot be enabled at the same time.\n"));\r
1595 CpuDeadLoop ();\r
1596 }\r
1597\r
1598 if (IsHeapGuardEnabled (GUARD_HEAP_TYPE_PAGE|GUARD_HEAP_TYPE_POOL)) {\r
1599 SetAllGuardPages ();\r
1600 }\r
1601\r
1602 if (IsHeapGuardEnabled (GUARD_HEAP_TYPE_FREED)) {\r
1603 GuardAllFreedPages ();\r
1604 }\r
7fef06af
JW
1605}\r
1606\r
e63da9f0
JW
1607/**\r
1608 Helper function to convert a UINT64 value in binary to a string.\r
1609\r
1610 @param[in] Value Value of a UINT64 integer.\r
1611 @param[out] BinString String buffer to contain the conversion result.\r
1612\r
1613 @return VOID.\r
1614**/\r
1615VOID\r
1616Uint64ToBinString (\r
1617 IN UINT64 Value,\r
1618 OUT CHAR8 *BinString\r
1619 )\r
1620{\r
1621 UINTN Index;\r
1622\r
1623 if (BinString == NULL) {\r
1624 return;\r
1625 }\r
1626\r
1627 for (Index = 64; Index > 0; --Index) {\r
1628 BinString[Index - 1] = '0' + (Value & 1);\r
1629 Value = RShiftU64 (Value, 1);\r
1630 }\r
1631 BinString[64] = '\0';\r
1632}\r
1633\r
1634/**\r
1635 Dump the guarded memory bit map.\r
1636**/\r
1637VOID\r
1638EFIAPI\r
1639DumpGuardedMemoryBitmap (\r
1640 VOID\r
1641 )\r
1642{\r
1643 UINTN Entries[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1644 UINTN Shifts[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1645 UINTN Indices[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1646 UINT64 Tables[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1647 UINT64 Addresses[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1648 UINT64 TableEntry;\r
1649 UINT64 Address;\r
1650 INTN Level;\r
1651 UINTN RepeatZero;\r
1652 CHAR8 String[GUARDED_HEAP_MAP_ENTRY_BITS + 1];\r
1653 CHAR8 *Ruler1;\r
1654 CHAR8 *Ruler2;\r
1655\r
63ebde8e
JW
1656 if (!IsHeapGuardEnabled (GUARD_HEAP_TYPE_ALL)) {\r
1657 return;\r
1658 }\r
1659\r
c6c50165
JW
1660 if (mGuardedMemoryMap == 0 ||\r
1661 mMapLevel == 0 ||\r
1662 mMapLevel > GUARDED_HEAP_MAP_TABLE_DEPTH) {\r
e63da9f0
JW
1663 return;\r
1664 }\r
1665\r
1666 Ruler1 = " 3 2 1 0";\r
1667 Ruler2 = "FEDCBA9876543210FEDCBA9876543210FEDCBA9876543210FEDCBA9876543210";\r
1668\r
1669 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "============================="\r
1670 " Guarded Memory Bitmap "\r
1671 "==============================\r\n"));\r
1672 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, " %a\r\n", Ruler1));\r
1673 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, " %a\r\n", Ruler2));\r
1674\r
1675 CopyMem (Entries, mLevelMask, sizeof (Entries));\r
1676 CopyMem (Shifts, mLevelShift, sizeof (Shifts));\r
1677\r
1678 SetMem (Indices, sizeof(Indices), 0);\r
1679 SetMem (Tables, sizeof(Tables), 0);\r
1680 SetMem (Addresses, sizeof(Addresses), 0);\r
1681\r
1682 Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;\r
1683 Tables[Level] = mGuardedMemoryMap;\r
1684 Address = 0;\r
1685 RepeatZero = 0;\r
1686\r
1687 while (TRUE) {\r
1688 if (Indices[Level] > Entries[Level]) {\r
1689\r
1690 Tables[Level] = 0;\r
1691 Level -= 1;\r
1692 RepeatZero = 0;\r
1693\r
1694 DEBUG ((\r
1695 HEAP_GUARD_DEBUG_LEVEL,\r
1696 "========================================="\r
1697 "=========================================\r\n"\r
1698 ));\r
1699\r
1700 } else {\r
1701\r
1702 TableEntry = ((UINT64 *)(UINTN)Tables[Level])[Indices[Level]];\r
1703 Address = Addresses[Level];\r
1704\r
1705 if (TableEntry == 0) {\r
1706\r
1707 if (Level == GUARDED_HEAP_MAP_TABLE_DEPTH - 1) {\r
1708 if (RepeatZero == 0) {\r
1709 Uint64ToBinString(TableEntry, String);\r
1710 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "%016lx: %a\r\n", Address, String));\r
1711 } else if (RepeatZero == 1) {\r
1712 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "... : ...\r\n"));\r
1713 }\r
1714 RepeatZero += 1;\r
1715 }\r
1716\r
1717 } else if (Level < GUARDED_HEAP_MAP_TABLE_DEPTH - 1) {\r
1718\r
1719 Level += 1;\r
1720 Tables[Level] = TableEntry;\r
1721 Addresses[Level] = Address;\r
1722 Indices[Level] = 0;\r
1723 RepeatZero = 0;\r
1724\r
1725 continue;\r
1726\r
1727 } else {\r
1728\r
1729 RepeatZero = 0;\r
1730 Uint64ToBinString(TableEntry, String);\r
1731 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "%016lx: %a\r\n", Address, String));\r
1732\r
1733 }\r
1734 }\r
1735\r
1736 if (Level < (GUARDED_HEAP_MAP_TABLE_DEPTH - (INTN)mMapLevel)) {\r
1737 break;\r
1738 }\r
1739\r
1740 Indices[Level] += 1;\r
1741 Address = (Level == 0) ? 0 : Addresses[Level - 1];\r
1742 Addresses[Level] = Address | LShiftU64(Indices[Level], Shifts[Level]);\r
1743\r
1744 }\r
1745}\r
1746\r