]> git.proxmox.com Git - mirror_edk2.git/blame - MdeModulePkg/Core/Dxe/Mem/HeapGuard.c
MdeModulePkg: Replace BSD License with BSD+Patent License
[mirror_edk2.git] / MdeModulePkg / Core / Dxe / Mem / HeapGuard.c
CommitLineData
e63da9f0
JW
1/** @file\r
2 UEFI Heap Guard functions.\r
3\r
8b13bca9 4Copyright (c) 2017-2018, Intel Corporation. All rights reserved.<BR>\r
9d510e61 5SPDX-License-Identifier: BSD-2-Clause-Patent\r
e63da9f0
JW
6\r
7**/\r
8\r
9#include "DxeMain.h"\r
10#include "Imem.h"\r
11#include "HeapGuard.h"\r
12\r
13//\r
14// Global to avoid infinite reentrance of memory allocation when updating\r
15// page table attributes, which may need allocate pages for new PDE/PTE.\r
16//\r
17GLOBAL_REMOVE_IF_UNREFERENCED BOOLEAN mOnGuarding = FALSE;\r
18\r
19//\r
20// Pointer to table tracking the Guarded memory with bitmap, in which '1'\r
21// is used to indicate memory guarded. '0' might be free memory or Guard\r
22// page itself, depending on status of memory adjacent to it.\r
23//\r
24GLOBAL_REMOVE_IF_UNREFERENCED UINT64 mGuardedMemoryMap = 0;\r
25\r
26//\r
27// Current depth level of map table pointed by mGuardedMemoryMap.\r
28// mMapLevel must be initialized at least by 1. It will be automatically\r
29// updated according to the address of memory just tracked.\r
30//\r
31GLOBAL_REMOVE_IF_UNREFERENCED UINTN mMapLevel = 1;\r
32\r
33//\r
34// Shift and mask for each level of map table\r
35//\r
36GLOBAL_REMOVE_IF_UNREFERENCED UINTN mLevelShift[GUARDED_HEAP_MAP_TABLE_DEPTH]\r
37 = GUARDED_HEAP_MAP_TABLE_DEPTH_SHIFTS;\r
38GLOBAL_REMOVE_IF_UNREFERENCED UINTN mLevelMask[GUARDED_HEAP_MAP_TABLE_DEPTH]\r
39 = GUARDED_HEAP_MAP_TABLE_DEPTH_MASKS;\r
40\r
63ebde8e
JW
41//\r
42// Used for promoting freed but not used pages.\r
43//\r
44GLOBAL_REMOVE_IF_UNREFERENCED EFI_PHYSICAL_ADDRESS mLastPromotedPage = BASE_4GB;\r
45\r
e63da9f0
JW
46/**\r
47 Set corresponding bits in bitmap table to 1 according to the address.\r
48\r
49 @param[in] Address Start address to set for.\r
50 @param[in] BitNumber Number of bits to set.\r
51 @param[in] BitMap Pointer to bitmap which covers the Address.\r
52\r
53 @return VOID.\r
54**/\r
55STATIC\r
56VOID\r
57SetBits (\r
58 IN EFI_PHYSICAL_ADDRESS Address,\r
59 IN UINTN BitNumber,\r
60 IN UINT64 *BitMap\r
61 )\r
62{\r
63 UINTN Lsbs;\r
64 UINTN Qwords;\r
65 UINTN Msbs;\r
66 UINTN StartBit;\r
67 UINTN EndBit;\r
68\r
69 StartBit = (UINTN)GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address);\r
70 EndBit = (StartBit + BitNumber - 1) % GUARDED_HEAP_MAP_ENTRY_BITS;\r
71\r
36f2f049 72 if ((StartBit + BitNumber) >= GUARDED_HEAP_MAP_ENTRY_BITS) {\r
e63da9f0
JW
73 Msbs = (GUARDED_HEAP_MAP_ENTRY_BITS - StartBit) %\r
74 GUARDED_HEAP_MAP_ENTRY_BITS;\r
75 Lsbs = (EndBit + 1) % GUARDED_HEAP_MAP_ENTRY_BITS;\r
76 Qwords = (BitNumber - Msbs) / GUARDED_HEAP_MAP_ENTRY_BITS;\r
77 } else {\r
78 Msbs = BitNumber;\r
79 Lsbs = 0;\r
80 Qwords = 0;\r
81 }\r
82\r
83 if (Msbs > 0) {\r
84 *BitMap |= LShiftU64 (LShiftU64 (1, Msbs) - 1, StartBit);\r
85 BitMap += 1;\r
86 }\r
87\r
88 if (Qwords > 0) {\r
89 SetMem64 ((VOID *)BitMap, Qwords * GUARDED_HEAP_MAP_ENTRY_BYTES,\r
90 (UINT64)-1);\r
91 BitMap += Qwords;\r
92 }\r
93\r
94 if (Lsbs > 0) {\r
95 *BitMap |= (LShiftU64 (1, Lsbs) - 1);\r
96 }\r
97}\r
98\r
99/**\r
100 Set corresponding bits in bitmap table to 0 according to the address.\r
101\r
102 @param[in] Address Start address to set for.\r
103 @param[in] BitNumber Number of bits to set.\r
104 @param[in] BitMap Pointer to bitmap which covers the Address.\r
105\r
106 @return VOID.\r
107**/\r
108STATIC\r
109VOID\r
110ClearBits (\r
111 IN EFI_PHYSICAL_ADDRESS Address,\r
112 IN UINTN BitNumber,\r
113 IN UINT64 *BitMap\r
114 )\r
115{\r
116 UINTN Lsbs;\r
117 UINTN Qwords;\r
118 UINTN Msbs;\r
119 UINTN StartBit;\r
120 UINTN EndBit;\r
121\r
122 StartBit = (UINTN)GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address);\r
123 EndBit = (StartBit + BitNumber - 1) % GUARDED_HEAP_MAP_ENTRY_BITS;\r
124\r
36f2f049 125 if ((StartBit + BitNumber) >= GUARDED_HEAP_MAP_ENTRY_BITS) {\r
e63da9f0
JW
126 Msbs = (GUARDED_HEAP_MAP_ENTRY_BITS - StartBit) %\r
127 GUARDED_HEAP_MAP_ENTRY_BITS;\r
128 Lsbs = (EndBit + 1) % GUARDED_HEAP_MAP_ENTRY_BITS;\r
129 Qwords = (BitNumber - Msbs) / GUARDED_HEAP_MAP_ENTRY_BITS;\r
130 } else {\r
131 Msbs = BitNumber;\r
132 Lsbs = 0;\r
133 Qwords = 0;\r
134 }\r
135\r
136 if (Msbs > 0) {\r
137 *BitMap &= ~LShiftU64 (LShiftU64 (1, Msbs) - 1, StartBit);\r
138 BitMap += 1;\r
139 }\r
140\r
141 if (Qwords > 0) {\r
142 SetMem64 ((VOID *)BitMap, Qwords * GUARDED_HEAP_MAP_ENTRY_BYTES, 0);\r
143 BitMap += Qwords;\r
144 }\r
145\r
146 if (Lsbs > 0) {\r
147 *BitMap &= ~(LShiftU64 (1, Lsbs) - 1);\r
148 }\r
149}\r
150\r
151/**\r
152 Get corresponding bits in bitmap table according to the address.\r
153\r
154 The value of bit 0 corresponds to the status of memory at given Address.\r
155 No more than 64 bits can be retrieved in one call.\r
156\r
157 @param[in] Address Start address to retrieve bits for.\r
158 @param[in] BitNumber Number of bits to get.\r
159 @param[in] BitMap Pointer to bitmap which covers the Address.\r
160\r
161 @return An integer containing the bits information.\r
162**/\r
163STATIC\r
164UINT64\r
165GetBits (\r
166 IN EFI_PHYSICAL_ADDRESS Address,\r
167 IN UINTN BitNumber,\r
168 IN UINT64 *BitMap\r
169 )\r
170{\r
171 UINTN StartBit;\r
172 UINTN EndBit;\r
173 UINTN Lsbs;\r
174 UINTN Msbs;\r
175 UINT64 Result;\r
176\r
177 ASSERT (BitNumber <= GUARDED_HEAP_MAP_ENTRY_BITS);\r
178\r
179 StartBit = (UINTN)GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address);\r
180 EndBit = (StartBit + BitNumber - 1) % GUARDED_HEAP_MAP_ENTRY_BITS;\r
181\r
182 if ((StartBit + BitNumber) > GUARDED_HEAP_MAP_ENTRY_BITS) {\r
183 Msbs = GUARDED_HEAP_MAP_ENTRY_BITS - StartBit;\r
184 Lsbs = (EndBit + 1) % GUARDED_HEAP_MAP_ENTRY_BITS;\r
185 } else {\r
186 Msbs = BitNumber;\r
187 Lsbs = 0;\r
188 }\r
189\r
36f2f049
JW
190 if (StartBit == 0 && BitNumber == GUARDED_HEAP_MAP_ENTRY_BITS) {\r
191 Result = *BitMap;\r
192 } else {\r
193 Result = RShiftU64((*BitMap), StartBit) & (LShiftU64(1, Msbs) - 1);\r
194 if (Lsbs > 0) {\r
195 BitMap += 1;\r
196 Result |= LShiftU64 ((*BitMap) & (LShiftU64 (1, Lsbs) - 1), Msbs);\r
197 }\r
e63da9f0
JW
198 }\r
199\r
200 return Result;\r
201}\r
202\r
203/**\r
204 Locate the pointer of bitmap from the guarded memory bitmap tables, which\r
205 covers the given Address.\r
206\r
207 @param[in] Address Start address to search the bitmap for.\r
208 @param[in] AllocMapUnit Flag to indicate memory allocation for the table.\r
209 @param[out] BitMap Pointer to bitmap which covers the Address.\r
210\r
211 @return The bit number from given Address to the end of current map table.\r
212**/\r
213UINTN\r
214FindGuardedMemoryMap (\r
215 IN EFI_PHYSICAL_ADDRESS Address,\r
216 IN BOOLEAN AllocMapUnit,\r
217 OUT UINT64 **BitMap\r
218 )\r
219{\r
220 UINTN Level;\r
221 UINT64 *GuardMap;\r
222 UINT64 MapMemory;\r
223 UINTN Index;\r
224 UINTN Size;\r
225 UINTN BitsToUnitEnd;\r
226 EFI_STATUS Status;\r
227\r
228 //\r
229 // Adjust current map table depth according to the address to access\r
230 //\r
dd12683e
JW
231 while (AllocMapUnit &&\r
232 mMapLevel < GUARDED_HEAP_MAP_TABLE_DEPTH &&\r
e63da9f0
JW
233 RShiftU64 (\r
234 Address,\r
235 mLevelShift[GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel - 1]\r
236 ) != 0) {\r
237\r
238 if (mGuardedMemoryMap != 0) {\r
239 Size = (mLevelMask[GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel - 1] + 1)\r
240 * GUARDED_HEAP_MAP_ENTRY_BYTES;\r
241 Status = CoreInternalAllocatePages (\r
242 AllocateAnyPages,\r
243 EfiBootServicesData,\r
244 EFI_SIZE_TO_PAGES (Size),\r
245 &MapMemory,\r
246 FALSE\r
247 );\r
248 ASSERT_EFI_ERROR (Status);\r
249 ASSERT (MapMemory != 0);\r
250\r
251 SetMem ((VOID *)(UINTN)MapMemory, Size, 0);\r
252\r
253 *(UINT64 *)(UINTN)MapMemory = mGuardedMemoryMap;\r
254 mGuardedMemoryMap = MapMemory;\r
255 }\r
256\r
257 mMapLevel++;\r
258\r
259 }\r
260\r
261 GuardMap = &mGuardedMemoryMap;\r
262 for (Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;\r
263 Level < GUARDED_HEAP_MAP_TABLE_DEPTH;\r
264 ++Level) {\r
265\r
266 if (*GuardMap == 0) {\r
267 if (!AllocMapUnit) {\r
268 GuardMap = NULL;\r
269 break;\r
270 }\r
271\r
272 Size = (mLevelMask[Level] + 1) * GUARDED_HEAP_MAP_ENTRY_BYTES;\r
273 Status = CoreInternalAllocatePages (\r
274 AllocateAnyPages,\r
275 EfiBootServicesData,\r
276 EFI_SIZE_TO_PAGES (Size),\r
277 &MapMemory,\r
278 FALSE\r
279 );\r
280 ASSERT_EFI_ERROR (Status);\r
281 ASSERT (MapMemory != 0);\r
282\r
283 SetMem ((VOID *)(UINTN)MapMemory, Size, 0);\r
284 *GuardMap = MapMemory;\r
285 }\r
286\r
287 Index = (UINTN)RShiftU64 (Address, mLevelShift[Level]);\r
288 Index &= mLevelMask[Level];\r
289 GuardMap = (UINT64 *)(UINTN)((*GuardMap) + Index * sizeof (UINT64));\r
290\r
291 }\r
292\r
293 BitsToUnitEnd = GUARDED_HEAP_MAP_BITS - GUARDED_HEAP_MAP_BIT_INDEX (Address);\r
294 *BitMap = GuardMap;\r
295\r
296 return BitsToUnitEnd;\r
297}\r
298\r
299/**\r
300 Set corresponding bits in bitmap table to 1 according to given memory range.\r
301\r
302 @param[in] Address Memory address to guard from.\r
303 @param[in] NumberOfPages Number of pages to guard.\r
304\r
305 @return VOID.\r
306**/\r
307VOID\r
308EFIAPI\r
309SetGuardedMemoryBits (\r
310 IN EFI_PHYSICAL_ADDRESS Address,\r
311 IN UINTN NumberOfPages\r
312 )\r
313{\r
314 UINT64 *BitMap;\r
315 UINTN Bits;\r
316 UINTN BitsToUnitEnd;\r
317\r
318 while (NumberOfPages > 0) {\r
319 BitsToUnitEnd = FindGuardedMemoryMap (Address, TRUE, &BitMap);\r
320 ASSERT (BitMap != NULL);\r
321\r
322 if (NumberOfPages > BitsToUnitEnd) {\r
323 // Cross map unit\r
324 Bits = BitsToUnitEnd;\r
325 } else {\r
326 Bits = NumberOfPages;\r
327 }\r
328\r
329 SetBits (Address, Bits, BitMap);\r
330\r
331 NumberOfPages -= Bits;\r
332 Address += EFI_PAGES_TO_SIZE (Bits);\r
333 }\r
334}\r
335\r
336/**\r
337 Clear corresponding bits in bitmap table according to given memory range.\r
338\r
339 @param[in] Address Memory address to unset from.\r
340 @param[in] NumberOfPages Number of pages to unset guard.\r
341\r
342 @return VOID.\r
343**/\r
344VOID\r
345EFIAPI\r
346ClearGuardedMemoryBits (\r
347 IN EFI_PHYSICAL_ADDRESS Address,\r
348 IN UINTN NumberOfPages\r
349 )\r
350{\r
351 UINT64 *BitMap;\r
352 UINTN Bits;\r
353 UINTN BitsToUnitEnd;\r
354\r
355 while (NumberOfPages > 0) {\r
356 BitsToUnitEnd = FindGuardedMemoryMap (Address, TRUE, &BitMap);\r
357 ASSERT (BitMap != NULL);\r
358\r
359 if (NumberOfPages > BitsToUnitEnd) {\r
360 // Cross map unit\r
361 Bits = BitsToUnitEnd;\r
362 } else {\r
363 Bits = NumberOfPages;\r
364 }\r
365\r
366 ClearBits (Address, Bits, BitMap);\r
367\r
368 NumberOfPages -= Bits;\r
369 Address += EFI_PAGES_TO_SIZE (Bits);\r
370 }\r
371}\r
372\r
373/**\r
374 Retrieve corresponding bits in bitmap table according to given memory range.\r
375\r
376 @param[in] Address Memory address to retrieve from.\r
377 @param[in] NumberOfPages Number of pages to retrieve.\r
378\r
379 @return An integer containing the guarded memory bitmap.\r
380**/\r
63ebde8e 381UINT64\r
e63da9f0
JW
382GetGuardedMemoryBits (\r
383 IN EFI_PHYSICAL_ADDRESS Address,\r
384 IN UINTN NumberOfPages\r
385 )\r
386{\r
387 UINT64 *BitMap;\r
388 UINTN Bits;\r
63ebde8e 389 UINT64 Result;\r
e63da9f0
JW
390 UINTN Shift;\r
391 UINTN BitsToUnitEnd;\r
392\r
393 ASSERT (NumberOfPages <= GUARDED_HEAP_MAP_ENTRY_BITS);\r
394\r
395 Result = 0;\r
396 Shift = 0;\r
397 while (NumberOfPages > 0) {\r
398 BitsToUnitEnd = FindGuardedMemoryMap (Address, FALSE, &BitMap);\r
399\r
400 if (NumberOfPages > BitsToUnitEnd) {\r
401 // Cross map unit\r
402 Bits = BitsToUnitEnd;\r
403 } else {\r
404 Bits = NumberOfPages;\r
405 }\r
406\r
407 if (BitMap != NULL) {\r
408 Result |= LShiftU64 (GetBits (Address, Bits, BitMap), Shift);\r
409 }\r
410\r
411 Shift += Bits;\r
412 NumberOfPages -= Bits;\r
413 Address += EFI_PAGES_TO_SIZE (Bits);\r
414 }\r
415\r
416 return Result;\r
417}\r
418\r
419/**\r
420 Get bit value in bitmap table for the given address.\r
421\r
422 @param[in] Address The address to retrieve for.\r
423\r
424 @return 1 or 0.\r
425**/\r
426UINTN\r
427EFIAPI\r
428GetGuardMapBit (\r
429 IN EFI_PHYSICAL_ADDRESS Address\r
430 )\r
431{\r
432 UINT64 *GuardMap;\r
433\r
434 FindGuardedMemoryMap (Address, FALSE, &GuardMap);\r
435 if (GuardMap != NULL) {\r
436 if (RShiftU64 (*GuardMap,\r
437 GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address)) & 1) {\r
438 return 1;\r
439 }\r
440 }\r
441\r
442 return 0;\r
443}\r
444\r
e63da9f0
JW
445\r
446/**\r
447 Check to see if the page at the given address is a Guard page or not.\r
448\r
449 @param[in] Address The address to check for.\r
450\r
451 @return TRUE The page at Address is a Guard page.\r
452 @return FALSE The page at Address is not a Guard page.\r
453**/\r
454BOOLEAN\r
455EFIAPI\r
456IsGuardPage (\r
457 IN EFI_PHYSICAL_ADDRESS Address\r
458 )\r
459{\r
e5001ab7 460 UINT64 BitMap;\r
e63da9f0
JW
461\r
462 //\r
463 // There must be at least one guarded page before and/or after given\r
464 // address if it's a Guard page. The bitmap pattern should be one of\r
465 // 001, 100 and 101\r
466 //\r
467 BitMap = GetGuardedMemoryBits (Address - EFI_PAGE_SIZE, 3);\r
468 return ((BitMap == BIT0) || (BitMap == BIT2) || (BitMap == (BIT2 | BIT0)));\r
469}\r
470\r
e63da9f0
JW
471\r
472/**\r
473 Check to see if the page at the given address is guarded or not.\r
474\r
475 @param[in] Address The address to check for.\r
476\r
477 @return TRUE The page at Address is guarded.\r
478 @return FALSE The page at Address is not guarded.\r
479**/\r
480BOOLEAN\r
481EFIAPI\r
482IsMemoryGuarded (\r
483 IN EFI_PHYSICAL_ADDRESS Address\r
484 )\r
485{\r
486 return (GetGuardMapBit (Address) == 1);\r
487}\r
488\r
489/**\r
490 Set the page at the given address to be a Guard page.\r
491\r
492 This is done by changing the page table attribute to be NOT PRSENT.\r
493\r
494 @param[in] BaseAddress Page address to Guard at\r
495\r
496 @return VOID\r
497**/\r
498VOID\r
499EFIAPI\r
500SetGuardPage (\r
501 IN EFI_PHYSICAL_ADDRESS BaseAddress\r
502 )\r
503{\r
a5cd613c
JW
504 EFI_STATUS Status;\r
505\r
7fef06af
JW
506 if (gCpu == NULL) {\r
507 return;\r
508 }\r
509\r
e63da9f0
JW
510 //\r
511 // Set flag to make sure allocating memory without GUARD for page table\r
512 // operation; otherwise infinite loops could be caused.\r
513 //\r
514 mOnGuarding = TRUE;\r
515 //\r
516 // Note: This might overwrite other attributes needed by other features,\r
c44218e5 517 // such as NX memory protection.\r
e63da9f0 518 //\r
a5cd613c
JW
519 Status = gCpu->SetMemoryAttributes (gCpu, BaseAddress, EFI_PAGE_SIZE, EFI_MEMORY_RP);\r
520 ASSERT_EFI_ERROR (Status);\r
e63da9f0
JW
521 mOnGuarding = FALSE;\r
522}\r
523\r
524/**\r
525 Unset the Guard page at the given address to the normal memory.\r
526\r
527 This is done by changing the page table attribute to be PRSENT.\r
528\r
529 @param[in] BaseAddress Page address to Guard at.\r
530\r
531 @return VOID.\r
532**/\r
533VOID\r
534EFIAPI\r
535UnsetGuardPage (\r
536 IN EFI_PHYSICAL_ADDRESS BaseAddress\r
537 )\r
538{\r
c44218e5 539 UINT64 Attributes;\r
a5cd613c 540 EFI_STATUS Status;\r
c44218e5 541\r
7fef06af
JW
542 if (gCpu == NULL) {\r
543 return;\r
544 }\r
545\r
c44218e5
JW
546 //\r
547 // Once the Guard page is unset, it will be freed back to memory pool. NX\r
548 // memory protection must be restored for this page if NX is enabled for free\r
549 // memory.\r
550 //\r
551 Attributes = 0;\r
552 if ((PcdGet64 (PcdDxeNxMemoryProtectionPolicy) & (1 << EfiConventionalMemory)) != 0) {\r
553 Attributes |= EFI_MEMORY_XP;\r
554 }\r
555\r
e63da9f0
JW
556 //\r
557 // Set flag to make sure allocating memory without GUARD for page table\r
558 // operation; otherwise infinite loops could be caused.\r
559 //\r
560 mOnGuarding = TRUE;\r
561 //\r
562 // Note: This might overwrite other attributes needed by other features,\r
563 // such as memory protection (NX). Please make sure they are not enabled\r
564 // at the same time.\r
565 //\r
a5cd613c
JW
566 Status = gCpu->SetMemoryAttributes (gCpu, BaseAddress, EFI_PAGE_SIZE, Attributes);\r
567 ASSERT_EFI_ERROR (Status);\r
e63da9f0
JW
568 mOnGuarding = FALSE;\r
569}\r
570\r
571/**\r
572 Check to see if the memory at the given address should be guarded or not.\r
573\r
574 @param[in] MemoryType Memory type to check.\r
575 @param[in] AllocateType Allocation type to check.\r
576 @param[in] PageOrPool Indicate a page allocation or pool allocation.\r
577\r
578\r
579 @return TRUE The given type of memory should be guarded.\r
580 @return FALSE The given type of memory should not be guarded.\r
581**/\r
582BOOLEAN\r
583IsMemoryTypeToGuard (\r
584 IN EFI_MEMORY_TYPE MemoryType,\r
585 IN EFI_ALLOCATE_TYPE AllocateType,\r
586 IN UINT8 PageOrPool\r
587 )\r
588{\r
589 UINT64 TestBit;\r
590 UINT64 ConfigBit;\r
e63da9f0 591\r
7fef06af 592 if (AllocateType == AllocateAddress) {\r
e63da9f0
JW
593 return FALSE;\r
594 }\r
595\r
e63da9f0
JW
596 if ((PcdGet8 (PcdHeapGuardPropertyMask) & PageOrPool) == 0) {\r
597 return FALSE;\r
598 }\r
599\r
600 if (PageOrPool == GUARD_HEAP_TYPE_POOL) {\r
601 ConfigBit = PcdGet64 (PcdHeapGuardPoolType);\r
602 } else if (PageOrPool == GUARD_HEAP_TYPE_PAGE) {\r
603 ConfigBit = PcdGet64 (PcdHeapGuardPageType);\r
604 } else {\r
605 ConfigBit = (UINT64)-1;\r
606 }\r
607\r
608 if ((UINT32)MemoryType >= MEMORY_TYPE_OS_RESERVED_MIN) {\r
609 TestBit = BIT63;\r
610 } else if ((UINT32) MemoryType >= MEMORY_TYPE_OEM_RESERVED_MIN) {\r
611 TestBit = BIT62;\r
612 } else if (MemoryType < EfiMaxMemoryType) {\r
613 TestBit = LShiftU64 (1, MemoryType);\r
614 } else if (MemoryType == EfiMaxMemoryType) {\r
615 TestBit = (UINT64)-1;\r
616 } else {\r
617 TestBit = 0;\r
618 }\r
619\r
620 return ((ConfigBit & TestBit) != 0);\r
621}\r
622\r
623/**\r
624 Check to see if the pool at the given address should be guarded or not.\r
625\r
626 @param[in] MemoryType Pool type to check.\r
627\r
628\r
629 @return TRUE The given type of pool should be guarded.\r
630 @return FALSE The given type of pool should not be guarded.\r
631**/\r
632BOOLEAN\r
633IsPoolTypeToGuard (\r
634 IN EFI_MEMORY_TYPE MemoryType\r
635 )\r
636{\r
637 return IsMemoryTypeToGuard (MemoryType, AllocateAnyPages,\r
638 GUARD_HEAP_TYPE_POOL);\r
639}\r
640\r
641/**\r
642 Check to see if the page at the given address should be guarded or not.\r
643\r
644 @param[in] MemoryType Page type to check.\r
645 @param[in] AllocateType Allocation type to check.\r
646\r
647 @return TRUE The given type of page should be guarded.\r
648 @return FALSE The given type of page should not be guarded.\r
649**/\r
650BOOLEAN\r
651IsPageTypeToGuard (\r
652 IN EFI_MEMORY_TYPE MemoryType,\r
653 IN EFI_ALLOCATE_TYPE AllocateType\r
654 )\r
655{\r
656 return IsMemoryTypeToGuard (MemoryType, AllocateType, GUARD_HEAP_TYPE_PAGE);\r
657}\r
658\r
a6a0a597
JW
659/**\r
660 Check to see if the heap guard is enabled for page and/or pool allocation.\r
661\r
63ebde8e
JW
662 @param[in] GuardType Specify the sub-type(s) of Heap Guard.\r
663\r
a6a0a597
JW
664 @return TRUE/FALSE.\r
665**/\r
666BOOLEAN\r
667IsHeapGuardEnabled (\r
63ebde8e 668 UINT8 GuardType\r
a6a0a597
JW
669 )\r
670{\r
63ebde8e 671 return IsMemoryTypeToGuard (EfiMaxMemoryType, AllocateAnyPages, GuardType);\r
a6a0a597
JW
672}\r
673\r
e63da9f0
JW
674/**\r
675 Set head Guard and tail Guard for the given memory range.\r
676\r
677 @param[in] Memory Base address of memory to set guard for.\r
678 @param[in] NumberOfPages Memory size in pages.\r
679\r
680 @return VOID\r
681**/\r
682VOID\r
683SetGuardForMemory (\r
684 IN EFI_PHYSICAL_ADDRESS Memory,\r
685 IN UINTN NumberOfPages\r
686 )\r
687{\r
688 EFI_PHYSICAL_ADDRESS GuardPage;\r
689\r
690 //\r
691 // Set tail Guard\r
692 //\r
693 GuardPage = Memory + EFI_PAGES_TO_SIZE (NumberOfPages);\r
694 if (!IsGuardPage (GuardPage)) {\r
695 SetGuardPage (GuardPage);\r
696 }\r
697\r
698 // Set head Guard\r
699 GuardPage = Memory - EFI_PAGES_TO_SIZE (1);\r
700 if (!IsGuardPage (GuardPage)) {\r
701 SetGuardPage (GuardPage);\r
702 }\r
703\r
704 //\r
705 // Mark the memory range as Guarded\r
706 //\r
707 SetGuardedMemoryBits (Memory, NumberOfPages);\r
708}\r
709\r
710/**\r
711 Unset head Guard and tail Guard for the given memory range.\r
712\r
713 @param[in] Memory Base address of memory to unset guard for.\r
714 @param[in] NumberOfPages Memory size in pages.\r
715\r
716 @return VOID\r
717**/\r
718VOID\r
719UnsetGuardForMemory (\r
720 IN EFI_PHYSICAL_ADDRESS Memory,\r
721 IN UINTN NumberOfPages\r
722 )\r
723{\r
724 EFI_PHYSICAL_ADDRESS GuardPage;\r
6cf0a677 725 UINT64 GuardBitmap;\r
e63da9f0
JW
726\r
727 if (NumberOfPages == 0) {\r
728 return;\r
729 }\r
730\r
731 //\r
732 // Head Guard must be one page before, if any.\r
733 //\r
6cf0a677
JW
734 // MSB-> 1 0 <-LSB\r
735 // -------------------\r
736 // Head Guard -> 0 1 -> Don't free Head Guard (shared Guard)\r
737 // Head Guard -> 0 0 -> Free Head Guard either (not shared Guard)\r
738 // 1 X -> Don't free first page (need a new Guard)\r
739 // (it'll be turned into a Guard page later)\r
740 // -------------------\r
741 // Start -> -1 -2\r
742 //\r
e63da9f0 743 GuardPage = Memory - EFI_PAGES_TO_SIZE (1);\r
6cf0a677
JW
744 GuardBitmap = GetGuardedMemoryBits (Memory - EFI_PAGES_TO_SIZE (2), 2);\r
745 if ((GuardBitmap & BIT1) == 0) {\r
746 //\r
747 // Head Guard exists.\r
748 //\r
749 if ((GuardBitmap & BIT0) == 0) {\r
e63da9f0
JW
750 //\r
751 // If the head Guard is not a tail Guard of adjacent memory block,\r
752 // unset it.\r
753 //\r
754 UnsetGuardPage (GuardPage);\r
755 }\r
6cf0a677 756 } else {\r
e63da9f0
JW
757 //\r
758 // Pages before memory to free are still in Guard. It's a partial free\r
759 // case. Turn first page of memory block to free into a new Guard.\r
760 //\r
761 SetGuardPage (Memory);\r
762 }\r
763\r
764 //\r
765 // Tail Guard must be the page after this memory block to free, if any.\r
766 //\r
6cf0a677
JW
767 // MSB-> 1 0 <-LSB\r
768 // --------------------\r
769 // 1 0 <- Tail Guard -> Don't free Tail Guard (shared Guard)\r
770 // 0 0 <- Tail Guard -> Free Tail Guard either (not shared Guard)\r
771 // X 1 -> Don't free last page (need a new Guard)\r
772 // (it'll be turned into a Guard page later)\r
773 // --------------------\r
774 // +1 +0 <- End\r
775 //\r
e63da9f0 776 GuardPage = Memory + EFI_PAGES_TO_SIZE (NumberOfPages);\r
6cf0a677
JW
777 GuardBitmap = GetGuardedMemoryBits (GuardPage, 2);\r
778 if ((GuardBitmap & BIT0) == 0) {\r
779 //\r
780 // Tail Guard exists.\r
781 //\r
782 if ((GuardBitmap & BIT1) == 0) {\r
e63da9f0
JW
783 //\r
784 // If the tail Guard is not a head Guard of adjacent memory block,\r
785 // free it; otherwise, keep it.\r
786 //\r
787 UnsetGuardPage (GuardPage);\r
788 }\r
6cf0a677 789 } else {\r
e63da9f0
JW
790 //\r
791 // Pages after memory to free are still in Guard. It's a partial free\r
792 // case. We need to keep one page to be a head Guard.\r
793 //\r
794 SetGuardPage (GuardPage - EFI_PAGES_TO_SIZE (1));\r
795 }\r
796\r
797 //\r
798 // No matter what, we just clear the mark of the Guarded memory.\r
799 //\r
800 ClearGuardedMemoryBits(Memory, NumberOfPages);\r
801}\r
802\r
803/**\r
804 Adjust address of free memory according to existing and/or required Guard.\r
805\r
806 This function will check if there're existing Guard pages of adjacent\r
807 memory blocks, and try to use it as the Guard page of the memory to be\r
808 allocated.\r
809\r
810 @param[in] Start Start address of free memory block.\r
811 @param[in] Size Size of free memory block.\r
812 @param[in] SizeRequested Size of memory to allocate.\r
813\r
814 @return The end address of memory block found.\r
815 @return 0 if no enough space for the required size of memory and its Guard.\r
816**/\r
817UINT64\r
818AdjustMemoryS (\r
819 IN UINT64 Start,\r
820 IN UINT64 Size,\r
821 IN UINT64 SizeRequested\r
822 )\r
823{\r
824 UINT64 Target;\r
825\r
c44218e5
JW
826 //\r
827 // UEFI spec requires that allocated pool must be 8-byte aligned. If it's\r
828 // indicated to put the pool near the Tail Guard, we need extra bytes to\r
829 // make sure alignment of the returned pool address.\r
830 //\r
831 if ((PcdGet8 (PcdHeapGuardPropertyMask) & BIT7) == 0) {\r
832 SizeRequested = ALIGN_VALUE(SizeRequested, 8);\r
833 }\r
834\r
e63da9f0 835 Target = Start + Size - SizeRequested;\r
dd12683e
JW
836 ASSERT (Target >= Start);\r
837 if (Target == 0) {\r
838 return 0;\r
839 }\r
e63da9f0 840\r
e63da9f0
JW
841 if (!IsGuardPage (Start + Size)) {\r
842 // No Guard at tail to share. One more page is needed.\r
843 Target -= EFI_PAGES_TO_SIZE (1);\r
844 }\r
845\r
846 // Out of range?\r
847 if (Target < Start) {\r
848 return 0;\r
849 }\r
850\r
851 // At the edge?\r
852 if (Target == Start) {\r
853 if (!IsGuardPage (Target - EFI_PAGES_TO_SIZE (1))) {\r
854 // No enough space for a new head Guard if no Guard at head to share.\r
855 return 0;\r
856 }\r
857 }\r
858\r
859 // OK, we have enough pages for memory and its Guards. Return the End of the\r
860 // free space.\r
861 return Target + SizeRequested - 1;\r
862}\r
863\r
864/**\r
865 Adjust the start address and number of pages to free according to Guard.\r
866\r
867 The purpose of this function is to keep the shared Guard page with adjacent\r
868 memory block if it's still in guard, or free it if no more sharing. Another\r
869 is to reserve pages as Guard pages in partial page free situation.\r
870\r
871 @param[in,out] Memory Base address of memory to free.\r
872 @param[in,out] NumberOfPages Size of memory to free.\r
873\r
874 @return VOID.\r
875**/\r
876VOID\r
877AdjustMemoryF (\r
878 IN OUT EFI_PHYSICAL_ADDRESS *Memory,\r
879 IN OUT UINTN *NumberOfPages\r
880 )\r
881{\r
882 EFI_PHYSICAL_ADDRESS Start;\r
883 EFI_PHYSICAL_ADDRESS MemoryToTest;\r
884 UINTN PagesToFree;\r
6cf0a677 885 UINT64 GuardBitmap;\r
e63da9f0
JW
886\r
887 if (Memory == NULL || NumberOfPages == NULL || *NumberOfPages == 0) {\r
888 return;\r
889 }\r
890\r
891 Start = *Memory;\r
892 PagesToFree = *NumberOfPages;\r
893\r
894 //\r
895 // Head Guard must be one page before, if any.\r
896 //\r
6cf0a677
JW
897 // MSB-> 1 0 <-LSB\r
898 // -------------------\r
899 // Head Guard -> 0 1 -> Don't free Head Guard (shared Guard)\r
900 // Head Guard -> 0 0 -> Free Head Guard either (not shared Guard)\r
901 // 1 X -> Don't free first page (need a new Guard)\r
902 // (it'll be turned into a Guard page later)\r
903 // -------------------\r
904 // Start -> -1 -2\r
905 //\r
906 MemoryToTest = Start - EFI_PAGES_TO_SIZE (2);\r
907 GuardBitmap = GetGuardedMemoryBits (MemoryToTest, 2);\r
908 if ((GuardBitmap & BIT1) == 0) {\r
909 //\r
910 // Head Guard exists.\r
911 //\r
912 if ((GuardBitmap & BIT0) == 0) {\r
e63da9f0
JW
913 //\r
914 // If the head Guard is not a tail Guard of adjacent memory block,\r
915 // free it; otherwise, keep it.\r
916 //\r
917 Start -= EFI_PAGES_TO_SIZE (1);\r
918 PagesToFree += 1;\r
919 }\r
6cf0a677 920 } else {\r
e63da9f0 921 //\r
6cf0a677
JW
922 // No Head Guard, and pages before memory to free are still in Guard. It's a\r
923 // partial free case. We need to keep one page to be a tail Guard.\r
e63da9f0
JW
924 //\r
925 Start += EFI_PAGES_TO_SIZE (1);\r
926 PagesToFree -= 1;\r
927 }\r
928\r
929 //\r
930 // Tail Guard must be the page after this memory block to free, if any.\r
931 //\r
6cf0a677
JW
932 // MSB-> 1 0 <-LSB\r
933 // --------------------\r
934 // 1 0 <- Tail Guard -> Don't free Tail Guard (shared Guard)\r
935 // 0 0 <- Tail Guard -> Free Tail Guard either (not shared Guard)\r
936 // X 1 -> Don't free last page (need a new Guard)\r
937 // (it'll be turned into a Guard page later)\r
938 // --------------------\r
939 // +1 +0 <- End\r
940 //\r
e63da9f0 941 MemoryToTest = Start + EFI_PAGES_TO_SIZE (PagesToFree);\r
6cf0a677
JW
942 GuardBitmap = GetGuardedMemoryBits (MemoryToTest, 2);\r
943 if ((GuardBitmap & BIT0) == 0) {\r
944 //\r
945 // Tail Guard exists.\r
946 //\r
947 if ((GuardBitmap & BIT1) == 0) {\r
e63da9f0
JW
948 //\r
949 // If the tail Guard is not a head Guard of adjacent memory block,\r
950 // free it; otherwise, keep it.\r
951 //\r
952 PagesToFree += 1;\r
953 }\r
6cf0a677 954 } else if (PagesToFree > 0) {\r
e63da9f0 955 //\r
6cf0a677
JW
956 // No Tail Guard, and pages after memory to free are still in Guard. It's a\r
957 // partial free case. We need to keep one page to be a head Guard.\r
e63da9f0
JW
958 //\r
959 PagesToFree -= 1;\r
960 }\r
961\r
962 *Memory = Start;\r
963 *NumberOfPages = PagesToFree;\r
964}\r
965\r
966/**\r
967 Adjust the base and number of pages to really allocate according to Guard.\r
968\r
969 @param[in,out] Memory Base address of free memory.\r
970 @param[in,out] NumberOfPages Size of memory to allocate.\r
971\r
972 @return VOID.\r
973**/\r
974VOID\r
975AdjustMemoryA (\r
976 IN OUT EFI_PHYSICAL_ADDRESS *Memory,\r
977 IN OUT UINTN *NumberOfPages\r
978 )\r
979{\r
980 //\r
981 // FindFreePages() has already taken the Guard into account. It's safe to\r
982 // adjust the start address and/or number of pages here, to make sure that\r
983 // the Guards are also "allocated".\r
984 //\r
985 if (!IsGuardPage (*Memory + EFI_PAGES_TO_SIZE (*NumberOfPages))) {\r
986 // No tail Guard, add one.\r
987 *NumberOfPages += 1;\r
988 }\r
989\r
990 if (!IsGuardPage (*Memory - EFI_PAGE_SIZE)) {\r
991 // No head Guard, add one.\r
992 *Memory -= EFI_PAGE_SIZE;\r
993 *NumberOfPages += 1;\r
994 }\r
995}\r
996\r
997/**\r
998 Adjust the pool head position to make sure the Guard page is adjavent to\r
999 pool tail or pool head.\r
1000\r
1001 @param[in] Memory Base address of memory allocated.\r
1002 @param[in] NoPages Number of pages actually allocated.\r
1003 @param[in] Size Size of memory requested.\r
1004 (plus pool head/tail overhead)\r
1005\r
1006 @return Address of pool head.\r
1007**/\r
1008VOID *\r
1009AdjustPoolHeadA (\r
1010 IN EFI_PHYSICAL_ADDRESS Memory,\r
1011 IN UINTN NoPages,\r
1012 IN UINTN Size\r
1013 )\r
1014{\r
c44218e5 1015 if (Memory == 0 || (PcdGet8 (PcdHeapGuardPropertyMask) & BIT7) != 0) {\r
e63da9f0
JW
1016 //\r
1017 // Pool head is put near the head Guard\r
1018 //\r
1019 return (VOID *)(UINTN)Memory;\r
1020 }\r
1021\r
1022 //\r
1023 // Pool head is put near the tail Guard\r
1024 //\r
c44218e5 1025 Size = ALIGN_VALUE (Size, 8);\r
e63da9f0
JW
1026 return (VOID *)(UINTN)(Memory + EFI_PAGES_TO_SIZE (NoPages) - Size);\r
1027}\r
1028\r
1029/**\r
1030 Get the page base address according to pool head address.\r
1031\r
1032 @param[in] Memory Head address of pool to free.\r
1033\r
1034 @return Address of pool head.\r
1035**/\r
1036VOID *\r
1037AdjustPoolHeadF (\r
1038 IN EFI_PHYSICAL_ADDRESS Memory\r
1039 )\r
1040{\r
c44218e5 1041 if (Memory == 0 || (PcdGet8 (PcdHeapGuardPropertyMask) & BIT7) != 0) {\r
e63da9f0
JW
1042 //\r
1043 // Pool head is put near the head Guard\r
1044 //\r
1045 return (VOID *)(UINTN)Memory;\r
1046 }\r
1047\r
1048 //\r
1049 // Pool head is put near the tail Guard\r
1050 //\r
1051 return (VOID *)(UINTN)(Memory & ~EFI_PAGE_MASK);\r
1052}\r
1053\r
1054/**\r
1055 Allocate or free guarded memory.\r
1056\r
1057 @param[in] Start Start address of memory to allocate or free.\r
1058 @param[in] NumberOfPages Memory size in pages.\r
1059 @param[in] NewType Memory type to convert to.\r
1060\r
1061 @return VOID.\r
1062**/\r
1063EFI_STATUS\r
1064CoreConvertPagesWithGuard (\r
1065 IN UINT64 Start,\r
1066 IN UINTN NumberOfPages,\r
1067 IN EFI_MEMORY_TYPE NewType\r
1068 )\r
1069{\r
425d2569
JW
1070 UINT64 OldStart;\r
1071 UINTN OldPages;\r
1072\r
e63da9f0 1073 if (NewType == EfiConventionalMemory) {\r
425d2569
JW
1074 OldStart = Start;\r
1075 OldPages = NumberOfPages;\r
1076\r
e63da9f0 1077 AdjustMemoryF (&Start, &NumberOfPages);\r
425d2569
JW
1078 //\r
1079 // It's safe to unset Guard page inside memory lock because there should\r
1080 // be no memory allocation occurred in updating memory page attribute at\r
1081 // this point. And unsetting Guard page before free will prevent Guard\r
1082 // page just freed back to pool from being allocated right away before\r
1083 // marking it usable (from non-present to present).\r
1084 //\r
1085 UnsetGuardForMemory (OldStart, OldPages);\r
1263ecf2
JW
1086 if (NumberOfPages == 0) {\r
1087 return EFI_SUCCESS;\r
1088 }\r
e63da9f0
JW
1089 } else {\r
1090 AdjustMemoryA (&Start, &NumberOfPages);\r
1091 }\r
1092\r
6cf0a677 1093 return CoreConvertPages (Start, NumberOfPages, NewType);\r
e63da9f0
JW
1094}\r
1095\r
7fef06af
JW
1096/**\r
1097 Set all Guard pages which cannot be set before CPU Arch Protocol installed.\r
1098**/\r
1099VOID\r
1100SetAllGuardPages (\r
1101 VOID\r
1102 )\r
1103{\r
1104 UINTN Entries[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1105 UINTN Shifts[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1106 UINTN Indices[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1107 UINT64 Tables[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1108 UINT64 Addresses[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1109 UINT64 TableEntry;\r
1110 UINT64 Address;\r
1111 UINT64 GuardPage;\r
1112 INTN Level;\r
1113 UINTN Index;\r
1114 BOOLEAN OnGuarding;\r
1115\r
1116 if (mGuardedMemoryMap == 0 ||\r
1117 mMapLevel == 0 ||\r
1118 mMapLevel > GUARDED_HEAP_MAP_TABLE_DEPTH) {\r
1119 return;\r
1120 }\r
1121\r
1122 CopyMem (Entries, mLevelMask, sizeof (Entries));\r
1123 CopyMem (Shifts, mLevelShift, sizeof (Shifts));\r
1124\r
1125 SetMem (Tables, sizeof(Tables), 0);\r
1126 SetMem (Addresses, sizeof(Addresses), 0);\r
1127 SetMem (Indices, sizeof(Indices), 0);\r
1128\r
1129 Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;\r
1130 Tables[Level] = mGuardedMemoryMap;\r
1131 Address = 0;\r
1132 OnGuarding = FALSE;\r
1133\r
1134 DEBUG_CODE (\r
1135 DumpGuardedMemoryBitmap ();\r
1136 );\r
1137\r
1138 while (TRUE) {\r
1139 if (Indices[Level] > Entries[Level]) {\r
1140 Tables[Level] = 0;\r
1141 Level -= 1;\r
1142 } else {\r
1143\r
1144 TableEntry = ((UINT64 *)(UINTN)(Tables[Level]))[Indices[Level]];\r
1145 Address = Addresses[Level];\r
1146\r
1147 if (TableEntry == 0) {\r
1148\r
1149 OnGuarding = FALSE;\r
1150\r
1151 } else if (Level < GUARDED_HEAP_MAP_TABLE_DEPTH - 1) {\r
1152\r
1153 Level += 1;\r
1154 Tables[Level] = TableEntry;\r
1155 Addresses[Level] = Address;\r
1156 Indices[Level] = 0;\r
1157\r
1158 continue;\r
1159\r
1160 } else {\r
1161\r
1162 Index = 0;\r
1163 while (Index < GUARDED_HEAP_MAP_ENTRY_BITS) {\r
1164 if ((TableEntry & 1) == 1) {\r
1165 if (OnGuarding) {\r
1166 GuardPage = 0;\r
1167 } else {\r
1168 GuardPage = Address - EFI_PAGE_SIZE;\r
1169 }\r
1170 OnGuarding = TRUE;\r
1171 } else {\r
1172 if (OnGuarding) {\r
1173 GuardPage = Address;\r
1174 } else {\r
1175 GuardPage = 0;\r
1176 }\r
1177 OnGuarding = FALSE;\r
1178 }\r
1179\r
1180 if (GuardPage != 0) {\r
1181 SetGuardPage (GuardPage);\r
1182 }\r
1183\r
1184 if (TableEntry == 0) {\r
1185 break;\r
1186 }\r
1187\r
1188 TableEntry = RShiftU64 (TableEntry, 1);\r
1189 Address += EFI_PAGE_SIZE;\r
1190 Index += 1;\r
1191 }\r
1192 }\r
1193 }\r
1194\r
1195 if (Level < (GUARDED_HEAP_MAP_TABLE_DEPTH - (INTN)mMapLevel)) {\r
1196 break;\r
1197 }\r
1198\r
1199 Indices[Level] += 1;\r
1200 Address = (Level == 0) ? 0 : Addresses[Level - 1];\r
1201 Addresses[Level] = Address | LShiftU64(Indices[Level], Shifts[Level]);\r
1202\r
1203 }\r
1204}\r
1205\r
63ebde8e
JW
1206/**\r
1207 Find the address of top-most guarded free page.\r
1208\r
1209 @param[out] Address Start address of top-most guarded free page.\r
1210\r
1211 @return VOID.\r
1212**/\r
1213VOID\r
1214GetLastGuardedFreePageAddress (\r
1215 OUT EFI_PHYSICAL_ADDRESS *Address\r
1216 )\r
1217{\r
1218 EFI_PHYSICAL_ADDRESS AddressGranularity;\r
1219 EFI_PHYSICAL_ADDRESS BaseAddress;\r
1220 UINTN Level;\r
1221 UINT64 Map;\r
1222 INTN Index;\r
1223\r
1224 ASSERT (mMapLevel >= 1);\r
1225\r
1226 BaseAddress = 0;\r
1227 Map = mGuardedMemoryMap;\r
1228 for (Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;\r
1229 Level < GUARDED_HEAP_MAP_TABLE_DEPTH;\r
1230 ++Level) {\r
1231 AddressGranularity = LShiftU64 (1, mLevelShift[Level]);\r
1232\r
1233 //\r
1234 // Find the non-NULL entry at largest index.\r
1235 //\r
1236 for (Index = (INTN)mLevelMask[Level]; Index >= 0 ; --Index) {\r
1237 if (((UINT64 *)(UINTN)Map)[Index] != 0) {\r
1238 BaseAddress += MultU64x32 (AddressGranularity, (UINT32)Index);\r
1239 Map = ((UINT64 *)(UINTN)Map)[Index];\r
1240 break;\r
1241 }\r
1242 }\r
1243 }\r
1244\r
1245 //\r
1246 // Find the non-zero MSB then get the page address.\r
1247 //\r
1248 while (Map != 0) {\r
1249 Map = RShiftU64 (Map, 1);\r
1250 BaseAddress += EFI_PAGES_TO_SIZE (1);\r
1251 }\r
1252\r
1253 *Address = BaseAddress;\r
1254}\r
1255\r
1256/**\r
1257 Record freed pages.\r
1258\r
1259 @param[in] BaseAddress Base address of just freed pages.\r
1260 @param[in] Pages Number of freed pages.\r
1261\r
1262 @return VOID.\r
1263**/\r
1264VOID\r
1265MarkFreedPages (\r
1266 IN EFI_PHYSICAL_ADDRESS BaseAddress,\r
1267 IN UINTN Pages\r
1268 )\r
1269{\r
1270 SetGuardedMemoryBits (BaseAddress, Pages);\r
1271}\r
1272\r
1273/**\r
1274 Record freed pages as well as mark them as not-present.\r
1275\r
1276 @param[in] BaseAddress Base address of just freed pages.\r
1277 @param[in] Pages Number of freed pages.\r
1278\r
1279 @return VOID.\r
1280**/\r
1281VOID\r
1282EFIAPI\r
1283GuardFreedPages (\r
1284 IN EFI_PHYSICAL_ADDRESS BaseAddress,\r
1285 IN UINTN Pages\r
1286 )\r
1287{\r
1288 EFI_STATUS Status;\r
1289\r
1290 //\r
1291 // Legacy memory lower than 1MB might be accessed with no allocation. Leave\r
1292 // them alone.\r
1293 //\r
1294 if (BaseAddress < BASE_1MB) {\r
1295 return;\r
1296 }\r
1297\r
1298 MarkFreedPages (BaseAddress, Pages);\r
1299 if (gCpu != NULL) {\r
1300 //\r
1301 // Set flag to make sure allocating memory without GUARD for page table\r
1302 // operation; otherwise infinite loops could be caused.\r
1303 //\r
1304 mOnGuarding = TRUE;\r
1305 //\r
1306 // Note: This might overwrite other attributes needed by other features,\r
1307 // such as NX memory protection.\r
1308 //\r
1309 Status = gCpu->SetMemoryAttributes (\r
1310 gCpu,\r
1311 BaseAddress,\r
1312 EFI_PAGES_TO_SIZE (Pages),\r
1313 EFI_MEMORY_RP\r
1314 );\r
1315 //\r
1316 // Normally we should ASSERT the returned Status. But there might be memory\r
1317 // alloc/free involved in SetMemoryAttributes(), which might fail this\r
1318 // calling. It's rare case so it's OK to let a few tiny holes be not-guarded.\r
1319 //\r
1320 if (EFI_ERROR (Status)) {\r
1321 DEBUG ((DEBUG_WARN, "Failed to guard freed pages: %p (%lu)\n", BaseAddress, (UINT64)Pages));\r
1322 }\r
1323 mOnGuarding = FALSE;\r
1324 }\r
1325}\r
1326\r
1327/**\r
1328 Record freed pages as well as mark them as not-present, if enabled.\r
1329\r
1330 @param[in] BaseAddress Base address of just freed pages.\r
1331 @param[in] Pages Number of freed pages.\r
1332\r
1333 @return VOID.\r
1334**/\r
1335VOID\r
1336EFIAPI\r
1337GuardFreedPagesChecked (\r
1338 IN EFI_PHYSICAL_ADDRESS BaseAddress,\r
1339 IN UINTN Pages\r
1340 )\r
1341{\r
1342 if (IsHeapGuardEnabled (GUARD_HEAP_TYPE_FREED)) {\r
1343 GuardFreedPages (BaseAddress, Pages);\r
1344 }\r
1345}\r
1346\r
1347/**\r
1348 Mark all pages freed before CPU Arch Protocol as not-present.\r
1349\r
1350**/\r
1351VOID\r
1352GuardAllFreedPages (\r
1353 VOID\r
1354 )\r
1355{\r
1356 UINTN Entries[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1357 UINTN Shifts[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1358 UINTN Indices[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1359 UINT64 Tables[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1360 UINT64 Addresses[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1361 UINT64 TableEntry;\r
1362 UINT64 Address;\r
1363 UINT64 GuardPage;\r
1364 INTN Level;\r
e5001ab7 1365 UINT64 BitIndex;\r
63ebde8e
JW
1366 UINTN GuardPageNumber;\r
1367\r
1368 if (mGuardedMemoryMap == 0 ||\r
1369 mMapLevel == 0 ||\r
1370 mMapLevel > GUARDED_HEAP_MAP_TABLE_DEPTH) {\r
1371 return;\r
1372 }\r
1373\r
1374 CopyMem (Entries, mLevelMask, sizeof (Entries));\r
1375 CopyMem (Shifts, mLevelShift, sizeof (Shifts));\r
1376\r
1377 SetMem (Tables, sizeof(Tables), 0);\r
1378 SetMem (Addresses, sizeof(Addresses), 0);\r
1379 SetMem (Indices, sizeof(Indices), 0);\r
1380\r
1381 Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;\r
1382 Tables[Level] = mGuardedMemoryMap;\r
1383 Address = 0;\r
1384 GuardPage = (UINT64)-1;\r
1385 GuardPageNumber = 0;\r
1386\r
1387 while (TRUE) {\r
1388 if (Indices[Level] > Entries[Level]) {\r
1389 Tables[Level] = 0;\r
1390 Level -= 1;\r
1391 } else {\r
1392 TableEntry = ((UINT64 *)(UINTN)(Tables[Level]))[Indices[Level]];\r
1393 Address = Addresses[Level];\r
1394\r
1395 if (Level < GUARDED_HEAP_MAP_TABLE_DEPTH - 1) {\r
1396 Level += 1;\r
1397 Tables[Level] = TableEntry;\r
1398 Addresses[Level] = Address;\r
1399 Indices[Level] = 0;\r
1400\r
1401 continue;\r
1402 } else {\r
1403 BitIndex = 1;\r
1404 while (BitIndex != 0) {\r
1405 if ((TableEntry & BitIndex) != 0) {\r
1406 if (GuardPage == (UINT64)-1) {\r
1407 GuardPage = Address;\r
1408 }\r
1409 ++GuardPageNumber;\r
1410 } else if (GuardPageNumber > 0) {\r
1411 GuardFreedPages (GuardPage, GuardPageNumber);\r
1412 GuardPageNumber = 0;\r
1413 GuardPage = (UINT64)-1;\r
1414 }\r
1415\r
1416 if (TableEntry == 0) {\r
1417 break;\r
1418 }\r
1419\r
1420 Address += EFI_PAGES_TO_SIZE (1);\r
1421 BitIndex = LShiftU64 (BitIndex, 1);\r
1422 }\r
1423 }\r
1424 }\r
1425\r
1426 if (Level < (GUARDED_HEAP_MAP_TABLE_DEPTH - (INTN)mMapLevel)) {\r
1427 break;\r
1428 }\r
1429\r
1430 Indices[Level] += 1;\r
1431 Address = (Level == 0) ? 0 : Addresses[Level - 1];\r
1432 Addresses[Level] = Address | LShiftU64 (Indices[Level], Shifts[Level]);\r
1433\r
1434 }\r
1435\r
1436 //\r
1437 // Update the maximum address of freed page which can be used for memory\r
1438 // promotion upon out-of-memory-space.\r
1439 //\r
1440 GetLastGuardedFreePageAddress (&Address);\r
1441 if (Address != 0) {\r
1442 mLastPromotedPage = Address;\r
1443 }\r
1444}\r
1445\r
1446/**\r
1447 This function checks to see if the given memory map descriptor in a memory map\r
1448 can be merged with any guarded free pages.\r
1449\r
1450 @param MemoryMapEntry A pointer to a descriptor in MemoryMap.\r
1451 @param MaxAddress Maximum address to stop the merge.\r
1452\r
1453 @return VOID\r
1454\r
1455**/\r
1456VOID\r
1457MergeGuardPages (\r
1458 IN EFI_MEMORY_DESCRIPTOR *MemoryMapEntry,\r
1459 IN EFI_PHYSICAL_ADDRESS MaxAddress\r
1460 )\r
1461{\r
1462 EFI_PHYSICAL_ADDRESS EndAddress;\r
1463 UINT64 Bitmap;\r
1464 INTN Pages;\r
1465\r
1466 if (!IsHeapGuardEnabled (GUARD_HEAP_TYPE_FREED) ||\r
1467 MemoryMapEntry->Type >= EfiMemoryMappedIO) {\r
1468 return;\r
1469 }\r
1470\r
1471 Bitmap = 0;\r
e5001ab7
JW
1472 Pages = EFI_SIZE_TO_PAGES ((UINTN)(MaxAddress - MemoryMapEntry->PhysicalStart));\r
1473 Pages -= (INTN)MemoryMapEntry->NumberOfPages;\r
63ebde8e
JW
1474 while (Pages > 0) {\r
1475 if (Bitmap == 0) {\r
1476 EndAddress = MemoryMapEntry->PhysicalStart +\r
e5001ab7 1477 EFI_PAGES_TO_SIZE ((UINTN)MemoryMapEntry->NumberOfPages);\r
63ebde8e
JW
1478 Bitmap = GetGuardedMemoryBits (EndAddress, GUARDED_HEAP_MAP_ENTRY_BITS);\r
1479 }\r
1480\r
1481 if ((Bitmap & 1) == 0) {\r
1482 break;\r
1483 }\r
1484\r
1485 Pages--;\r
1486 MemoryMapEntry->NumberOfPages++;\r
1487 Bitmap = RShiftU64 (Bitmap, 1);\r
1488 }\r
1489}\r
1490\r
1491/**\r
1492 Put part (at most 64 pages a time) guarded free pages back to free page pool.\r
1493\r
1494 Freed memory guard is used to detect Use-After-Free (UAF) memory issue, which\r
1495 makes use of 'Used then throw away' way to detect any illegal access to freed\r
1496 memory. The thrown-away memory will be marked as not-present so that any access\r
1497 to those memory (after free) will be caught by page-fault exception.\r
1498\r
1499 The problem is that this will consume lots of memory space. Once no memory\r
1500 left in pool to allocate, we have to restore part of the freed pages to their\r
1501 normal function. Otherwise the whole system will stop functioning.\r
1502\r
1503 @param StartAddress Start address of promoted memory.\r
1504 @param EndAddress End address of promoted memory.\r
1505\r
1506 @return TRUE Succeeded to promote memory.\r
1507 @return FALSE No free memory found.\r
1508\r
1509**/\r
1510BOOLEAN\r
1511PromoteGuardedFreePages (\r
1512 OUT EFI_PHYSICAL_ADDRESS *StartAddress,\r
1513 OUT EFI_PHYSICAL_ADDRESS *EndAddress\r
1514 )\r
1515{\r
1516 EFI_STATUS Status;\r
1517 UINTN AvailablePages;\r
1518 UINT64 Bitmap;\r
1519 EFI_PHYSICAL_ADDRESS Start;\r
1520\r
1521 if (!IsHeapGuardEnabled (GUARD_HEAP_TYPE_FREED)) {\r
1522 return FALSE;\r
1523 }\r
1524\r
1525 //\r
1526 // Similar to memory allocation service, always search the freed pages in\r
1527 // descending direction.\r
1528 //\r
1529 Start = mLastPromotedPage;\r
1530 AvailablePages = 0;\r
1531 while (AvailablePages == 0) {\r
1532 Start -= EFI_PAGES_TO_SIZE (GUARDED_HEAP_MAP_ENTRY_BITS);\r
1533 //\r
1534 // If the address wraps around, try the really freed pages at top.\r
1535 //\r
1536 if (Start > mLastPromotedPage) {\r
1537 GetLastGuardedFreePageAddress (&Start);\r
1538 ASSERT (Start != 0);\r
1539 Start -= EFI_PAGES_TO_SIZE (GUARDED_HEAP_MAP_ENTRY_BITS);\r
1540 }\r
1541\r
1542 Bitmap = GetGuardedMemoryBits (Start, GUARDED_HEAP_MAP_ENTRY_BITS);\r
1543 while (Bitmap > 0) {\r
1544 if ((Bitmap & 1) != 0) {\r
1545 ++AvailablePages;\r
1546 } else if (AvailablePages == 0) {\r
1547 Start += EFI_PAGES_TO_SIZE (1);\r
1548 } else {\r
1549 break;\r
1550 }\r
1551\r
1552 Bitmap = RShiftU64 (Bitmap, 1);\r
1553 }\r
1554 }\r
1555\r
895415ed 1556 if (AvailablePages != 0) {\r
63ebde8e
JW
1557 DEBUG ((DEBUG_INFO, "Promoted pages: %lX (%lx)\r\n", Start, (UINT64)AvailablePages));\r
1558 ClearGuardedMemoryBits (Start, AvailablePages);\r
1559\r
1560 if (gCpu != NULL) {\r
1561 //\r
1562 // Set flag to make sure allocating memory without GUARD for page table\r
1563 // operation; otherwise infinite loops could be caused.\r
1564 //\r
1565 mOnGuarding = TRUE;\r
1566 Status = gCpu->SetMemoryAttributes (gCpu, Start, EFI_PAGES_TO_SIZE(AvailablePages), 0);\r
1567 ASSERT_EFI_ERROR (Status);\r
1568 mOnGuarding = FALSE;\r
1569 }\r
1570\r
1571 mLastPromotedPage = Start;\r
1572 *StartAddress = Start;\r
1573 *EndAddress = Start + EFI_PAGES_TO_SIZE (AvailablePages) - 1;\r
1574 return TRUE;\r
1575 }\r
1576\r
1577 return FALSE;\r
1578}\r
1579\r
7fef06af
JW
1580/**\r
1581 Notify function used to set all Guard pages before CPU Arch Protocol installed.\r
1582**/\r
1583VOID\r
1584HeapGuardCpuArchProtocolNotify (\r
1585 VOID\r
1586 )\r
1587{\r
1588 ASSERT (gCpu != NULL);\r
63ebde8e
JW
1589\r
1590 if (IsHeapGuardEnabled (GUARD_HEAP_TYPE_PAGE|GUARD_HEAP_TYPE_POOL) &&\r
1591 IsHeapGuardEnabled (GUARD_HEAP_TYPE_FREED)) {\r
1592 DEBUG ((DEBUG_ERROR, "Heap guard and freed memory guard cannot be enabled at the same time.\n"));\r
1593 CpuDeadLoop ();\r
1594 }\r
1595\r
1596 if (IsHeapGuardEnabled (GUARD_HEAP_TYPE_PAGE|GUARD_HEAP_TYPE_POOL)) {\r
1597 SetAllGuardPages ();\r
1598 }\r
1599\r
1600 if (IsHeapGuardEnabled (GUARD_HEAP_TYPE_FREED)) {\r
1601 GuardAllFreedPages ();\r
1602 }\r
7fef06af
JW
1603}\r
1604\r
e63da9f0
JW
1605/**\r
1606 Helper function to convert a UINT64 value in binary to a string.\r
1607\r
1608 @param[in] Value Value of a UINT64 integer.\r
1609 @param[out] BinString String buffer to contain the conversion result.\r
1610\r
1611 @return VOID.\r
1612**/\r
1613VOID\r
1614Uint64ToBinString (\r
1615 IN UINT64 Value,\r
1616 OUT CHAR8 *BinString\r
1617 )\r
1618{\r
1619 UINTN Index;\r
1620\r
1621 if (BinString == NULL) {\r
1622 return;\r
1623 }\r
1624\r
1625 for (Index = 64; Index > 0; --Index) {\r
1626 BinString[Index - 1] = '0' + (Value & 1);\r
1627 Value = RShiftU64 (Value, 1);\r
1628 }\r
1629 BinString[64] = '\0';\r
1630}\r
1631\r
1632/**\r
1633 Dump the guarded memory bit map.\r
1634**/\r
1635VOID\r
1636EFIAPI\r
1637DumpGuardedMemoryBitmap (\r
1638 VOID\r
1639 )\r
1640{\r
1641 UINTN Entries[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1642 UINTN Shifts[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1643 UINTN Indices[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1644 UINT64 Tables[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1645 UINT64 Addresses[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1646 UINT64 TableEntry;\r
1647 UINT64 Address;\r
1648 INTN Level;\r
1649 UINTN RepeatZero;\r
1650 CHAR8 String[GUARDED_HEAP_MAP_ENTRY_BITS + 1];\r
1651 CHAR8 *Ruler1;\r
1652 CHAR8 *Ruler2;\r
1653\r
63ebde8e
JW
1654 if (!IsHeapGuardEnabled (GUARD_HEAP_TYPE_ALL)) {\r
1655 return;\r
1656 }\r
1657\r
c6c50165
JW
1658 if (mGuardedMemoryMap == 0 ||\r
1659 mMapLevel == 0 ||\r
1660 mMapLevel > GUARDED_HEAP_MAP_TABLE_DEPTH) {\r
e63da9f0
JW
1661 return;\r
1662 }\r
1663\r
1664 Ruler1 = " 3 2 1 0";\r
1665 Ruler2 = "FEDCBA9876543210FEDCBA9876543210FEDCBA9876543210FEDCBA9876543210";\r
1666\r
1667 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "============================="\r
1668 " Guarded Memory Bitmap "\r
1669 "==============================\r\n"));\r
1670 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, " %a\r\n", Ruler1));\r
1671 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, " %a\r\n", Ruler2));\r
1672\r
1673 CopyMem (Entries, mLevelMask, sizeof (Entries));\r
1674 CopyMem (Shifts, mLevelShift, sizeof (Shifts));\r
1675\r
1676 SetMem (Indices, sizeof(Indices), 0);\r
1677 SetMem (Tables, sizeof(Tables), 0);\r
1678 SetMem (Addresses, sizeof(Addresses), 0);\r
1679\r
1680 Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;\r
1681 Tables[Level] = mGuardedMemoryMap;\r
1682 Address = 0;\r
1683 RepeatZero = 0;\r
1684\r
1685 while (TRUE) {\r
1686 if (Indices[Level] > Entries[Level]) {\r
1687\r
1688 Tables[Level] = 0;\r
1689 Level -= 1;\r
1690 RepeatZero = 0;\r
1691\r
1692 DEBUG ((\r
1693 HEAP_GUARD_DEBUG_LEVEL,\r
1694 "========================================="\r
1695 "=========================================\r\n"\r
1696 ));\r
1697\r
1698 } else {\r
1699\r
1700 TableEntry = ((UINT64 *)(UINTN)Tables[Level])[Indices[Level]];\r
1701 Address = Addresses[Level];\r
1702\r
1703 if (TableEntry == 0) {\r
1704\r
1705 if (Level == GUARDED_HEAP_MAP_TABLE_DEPTH - 1) {\r
1706 if (RepeatZero == 0) {\r
1707 Uint64ToBinString(TableEntry, String);\r
1708 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "%016lx: %a\r\n", Address, String));\r
1709 } else if (RepeatZero == 1) {\r
1710 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "... : ...\r\n"));\r
1711 }\r
1712 RepeatZero += 1;\r
1713 }\r
1714\r
1715 } else if (Level < GUARDED_HEAP_MAP_TABLE_DEPTH - 1) {\r
1716\r
1717 Level += 1;\r
1718 Tables[Level] = TableEntry;\r
1719 Addresses[Level] = Address;\r
1720 Indices[Level] = 0;\r
1721 RepeatZero = 0;\r
1722\r
1723 continue;\r
1724\r
1725 } else {\r
1726\r
1727 RepeatZero = 0;\r
1728 Uint64ToBinString(TableEntry, String);\r
1729 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "%016lx: %a\r\n", Address, String));\r
1730\r
1731 }\r
1732 }\r
1733\r
1734 if (Level < (GUARDED_HEAP_MAP_TABLE_DEPTH - (INTN)mMapLevel)) {\r
1735 break;\r
1736 }\r
1737\r
1738 Indices[Level] += 1;\r
1739 Address = (Level == 0) ? 0 : Addresses[Level - 1];\r
1740 Addresses[Level] = Address | LShiftU64(Indices[Level], Shifts[Level]);\r
1741\r
1742 }\r
1743}\r
1744\r