]> git.proxmox.com Git - mirror_edk2.git/blame - MdeModulePkg/Core/Dxe/Mem/HeapGuard.c
MdeModulePkg/Core: add freed-memory guard feature
[mirror_edk2.git] / MdeModulePkg / Core / Dxe / Mem / HeapGuard.c
CommitLineData
e63da9f0
JW
1/** @file\r
2 UEFI Heap Guard functions.\r
3\r
8b13bca9 4Copyright (c) 2017-2018, Intel Corporation. All rights reserved.<BR>\r
e63da9f0
JW
5This program and the accompanying materials\r
6are licensed and made available under the terms and conditions of the BSD License\r
7which accompanies this distribution. The full text of the license may be found at\r
8http://opensource.org/licenses/bsd-license.php\r
9\r
10THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
11WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
12\r
13**/\r
14\r
15#include "DxeMain.h"\r
16#include "Imem.h"\r
17#include "HeapGuard.h"\r
18\r
19//\r
20// Global to avoid infinite reentrance of memory allocation when updating\r
21// page table attributes, which may need allocate pages for new PDE/PTE.\r
22//\r
23GLOBAL_REMOVE_IF_UNREFERENCED BOOLEAN mOnGuarding = FALSE;\r
24\r
25//\r
26// Pointer to table tracking the Guarded memory with bitmap, in which '1'\r
27// is used to indicate memory guarded. '0' might be free memory or Guard\r
28// page itself, depending on status of memory adjacent to it.\r
29//\r
30GLOBAL_REMOVE_IF_UNREFERENCED UINT64 mGuardedMemoryMap = 0;\r
31\r
32//\r
33// Current depth level of map table pointed by mGuardedMemoryMap.\r
34// mMapLevel must be initialized at least by 1. It will be automatically\r
35// updated according to the address of memory just tracked.\r
36//\r
37GLOBAL_REMOVE_IF_UNREFERENCED UINTN mMapLevel = 1;\r
38\r
39//\r
40// Shift and mask for each level of map table\r
41//\r
42GLOBAL_REMOVE_IF_UNREFERENCED UINTN mLevelShift[GUARDED_HEAP_MAP_TABLE_DEPTH]\r
43 = GUARDED_HEAP_MAP_TABLE_DEPTH_SHIFTS;\r
44GLOBAL_REMOVE_IF_UNREFERENCED UINTN mLevelMask[GUARDED_HEAP_MAP_TABLE_DEPTH]\r
45 = GUARDED_HEAP_MAP_TABLE_DEPTH_MASKS;\r
46\r
63ebde8e
JW
47//\r
48// Used for promoting freed but not used pages.\r
49//\r
50GLOBAL_REMOVE_IF_UNREFERENCED EFI_PHYSICAL_ADDRESS mLastPromotedPage = BASE_4GB;\r
51\r
e63da9f0
JW
52/**\r
53 Set corresponding bits in bitmap table to 1 according to the address.\r
54\r
55 @param[in] Address Start address to set for.\r
56 @param[in] BitNumber Number of bits to set.\r
57 @param[in] BitMap Pointer to bitmap which covers the Address.\r
58\r
59 @return VOID.\r
60**/\r
61STATIC\r
62VOID\r
63SetBits (\r
64 IN EFI_PHYSICAL_ADDRESS Address,\r
65 IN UINTN BitNumber,\r
66 IN UINT64 *BitMap\r
67 )\r
68{\r
69 UINTN Lsbs;\r
70 UINTN Qwords;\r
71 UINTN Msbs;\r
72 UINTN StartBit;\r
73 UINTN EndBit;\r
74\r
75 StartBit = (UINTN)GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address);\r
76 EndBit = (StartBit + BitNumber - 1) % GUARDED_HEAP_MAP_ENTRY_BITS;\r
77\r
36f2f049 78 if ((StartBit + BitNumber) >= GUARDED_HEAP_MAP_ENTRY_BITS) {\r
e63da9f0
JW
79 Msbs = (GUARDED_HEAP_MAP_ENTRY_BITS - StartBit) %\r
80 GUARDED_HEAP_MAP_ENTRY_BITS;\r
81 Lsbs = (EndBit + 1) % GUARDED_HEAP_MAP_ENTRY_BITS;\r
82 Qwords = (BitNumber - Msbs) / GUARDED_HEAP_MAP_ENTRY_BITS;\r
83 } else {\r
84 Msbs = BitNumber;\r
85 Lsbs = 0;\r
86 Qwords = 0;\r
87 }\r
88\r
89 if (Msbs > 0) {\r
90 *BitMap |= LShiftU64 (LShiftU64 (1, Msbs) - 1, StartBit);\r
91 BitMap += 1;\r
92 }\r
93\r
94 if (Qwords > 0) {\r
95 SetMem64 ((VOID *)BitMap, Qwords * GUARDED_HEAP_MAP_ENTRY_BYTES,\r
96 (UINT64)-1);\r
97 BitMap += Qwords;\r
98 }\r
99\r
100 if (Lsbs > 0) {\r
101 *BitMap |= (LShiftU64 (1, Lsbs) - 1);\r
102 }\r
103}\r
104\r
105/**\r
106 Set corresponding bits in bitmap table to 0 according to the address.\r
107\r
108 @param[in] Address Start address to set for.\r
109 @param[in] BitNumber Number of bits to set.\r
110 @param[in] BitMap Pointer to bitmap which covers the Address.\r
111\r
112 @return VOID.\r
113**/\r
114STATIC\r
115VOID\r
116ClearBits (\r
117 IN EFI_PHYSICAL_ADDRESS Address,\r
118 IN UINTN BitNumber,\r
119 IN UINT64 *BitMap\r
120 )\r
121{\r
122 UINTN Lsbs;\r
123 UINTN Qwords;\r
124 UINTN Msbs;\r
125 UINTN StartBit;\r
126 UINTN EndBit;\r
127\r
128 StartBit = (UINTN)GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address);\r
129 EndBit = (StartBit + BitNumber - 1) % GUARDED_HEAP_MAP_ENTRY_BITS;\r
130\r
36f2f049 131 if ((StartBit + BitNumber) >= GUARDED_HEAP_MAP_ENTRY_BITS) {\r
e63da9f0
JW
132 Msbs = (GUARDED_HEAP_MAP_ENTRY_BITS - StartBit) %\r
133 GUARDED_HEAP_MAP_ENTRY_BITS;\r
134 Lsbs = (EndBit + 1) % GUARDED_HEAP_MAP_ENTRY_BITS;\r
135 Qwords = (BitNumber - Msbs) / GUARDED_HEAP_MAP_ENTRY_BITS;\r
136 } else {\r
137 Msbs = BitNumber;\r
138 Lsbs = 0;\r
139 Qwords = 0;\r
140 }\r
141\r
142 if (Msbs > 0) {\r
143 *BitMap &= ~LShiftU64 (LShiftU64 (1, Msbs) - 1, StartBit);\r
144 BitMap += 1;\r
145 }\r
146\r
147 if (Qwords > 0) {\r
148 SetMem64 ((VOID *)BitMap, Qwords * GUARDED_HEAP_MAP_ENTRY_BYTES, 0);\r
149 BitMap += Qwords;\r
150 }\r
151\r
152 if (Lsbs > 0) {\r
153 *BitMap &= ~(LShiftU64 (1, Lsbs) - 1);\r
154 }\r
155}\r
156\r
157/**\r
158 Get corresponding bits in bitmap table according to the address.\r
159\r
160 The value of bit 0 corresponds to the status of memory at given Address.\r
161 No more than 64 bits can be retrieved in one call.\r
162\r
163 @param[in] Address Start address to retrieve bits for.\r
164 @param[in] BitNumber Number of bits to get.\r
165 @param[in] BitMap Pointer to bitmap which covers the Address.\r
166\r
167 @return An integer containing the bits information.\r
168**/\r
169STATIC\r
170UINT64\r
171GetBits (\r
172 IN EFI_PHYSICAL_ADDRESS Address,\r
173 IN UINTN BitNumber,\r
174 IN UINT64 *BitMap\r
175 )\r
176{\r
177 UINTN StartBit;\r
178 UINTN EndBit;\r
179 UINTN Lsbs;\r
180 UINTN Msbs;\r
181 UINT64 Result;\r
182\r
183 ASSERT (BitNumber <= GUARDED_HEAP_MAP_ENTRY_BITS);\r
184\r
185 StartBit = (UINTN)GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address);\r
186 EndBit = (StartBit + BitNumber - 1) % GUARDED_HEAP_MAP_ENTRY_BITS;\r
187\r
188 if ((StartBit + BitNumber) > GUARDED_HEAP_MAP_ENTRY_BITS) {\r
189 Msbs = GUARDED_HEAP_MAP_ENTRY_BITS - StartBit;\r
190 Lsbs = (EndBit + 1) % GUARDED_HEAP_MAP_ENTRY_BITS;\r
191 } else {\r
192 Msbs = BitNumber;\r
193 Lsbs = 0;\r
194 }\r
195\r
36f2f049
JW
196 if (StartBit == 0 && BitNumber == GUARDED_HEAP_MAP_ENTRY_BITS) {\r
197 Result = *BitMap;\r
198 } else {\r
199 Result = RShiftU64((*BitMap), StartBit) & (LShiftU64(1, Msbs) - 1);\r
200 if (Lsbs > 0) {\r
201 BitMap += 1;\r
202 Result |= LShiftU64 ((*BitMap) & (LShiftU64 (1, Lsbs) - 1), Msbs);\r
203 }\r
e63da9f0
JW
204 }\r
205\r
206 return Result;\r
207}\r
208\r
209/**\r
210 Locate the pointer of bitmap from the guarded memory bitmap tables, which\r
211 covers the given Address.\r
212\r
213 @param[in] Address Start address to search the bitmap for.\r
214 @param[in] AllocMapUnit Flag to indicate memory allocation for the table.\r
215 @param[out] BitMap Pointer to bitmap which covers the Address.\r
216\r
217 @return The bit number from given Address to the end of current map table.\r
218**/\r
219UINTN\r
220FindGuardedMemoryMap (\r
221 IN EFI_PHYSICAL_ADDRESS Address,\r
222 IN BOOLEAN AllocMapUnit,\r
223 OUT UINT64 **BitMap\r
224 )\r
225{\r
226 UINTN Level;\r
227 UINT64 *GuardMap;\r
228 UINT64 MapMemory;\r
229 UINTN Index;\r
230 UINTN Size;\r
231 UINTN BitsToUnitEnd;\r
232 EFI_STATUS Status;\r
233\r
234 //\r
235 // Adjust current map table depth according to the address to access\r
236 //\r
dd12683e
JW
237 while (AllocMapUnit &&\r
238 mMapLevel < GUARDED_HEAP_MAP_TABLE_DEPTH &&\r
e63da9f0
JW
239 RShiftU64 (\r
240 Address,\r
241 mLevelShift[GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel - 1]\r
242 ) != 0) {\r
243\r
244 if (mGuardedMemoryMap != 0) {\r
245 Size = (mLevelMask[GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel - 1] + 1)\r
246 * GUARDED_HEAP_MAP_ENTRY_BYTES;\r
247 Status = CoreInternalAllocatePages (\r
248 AllocateAnyPages,\r
249 EfiBootServicesData,\r
250 EFI_SIZE_TO_PAGES (Size),\r
251 &MapMemory,\r
252 FALSE\r
253 );\r
254 ASSERT_EFI_ERROR (Status);\r
255 ASSERT (MapMemory != 0);\r
256\r
257 SetMem ((VOID *)(UINTN)MapMemory, Size, 0);\r
258\r
259 *(UINT64 *)(UINTN)MapMemory = mGuardedMemoryMap;\r
260 mGuardedMemoryMap = MapMemory;\r
261 }\r
262\r
263 mMapLevel++;\r
264\r
265 }\r
266\r
267 GuardMap = &mGuardedMemoryMap;\r
268 for (Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;\r
269 Level < GUARDED_HEAP_MAP_TABLE_DEPTH;\r
270 ++Level) {\r
271\r
272 if (*GuardMap == 0) {\r
273 if (!AllocMapUnit) {\r
274 GuardMap = NULL;\r
275 break;\r
276 }\r
277\r
278 Size = (mLevelMask[Level] + 1) * GUARDED_HEAP_MAP_ENTRY_BYTES;\r
279 Status = CoreInternalAllocatePages (\r
280 AllocateAnyPages,\r
281 EfiBootServicesData,\r
282 EFI_SIZE_TO_PAGES (Size),\r
283 &MapMemory,\r
284 FALSE\r
285 );\r
286 ASSERT_EFI_ERROR (Status);\r
287 ASSERT (MapMemory != 0);\r
288\r
289 SetMem ((VOID *)(UINTN)MapMemory, Size, 0);\r
290 *GuardMap = MapMemory;\r
291 }\r
292\r
293 Index = (UINTN)RShiftU64 (Address, mLevelShift[Level]);\r
294 Index &= mLevelMask[Level];\r
295 GuardMap = (UINT64 *)(UINTN)((*GuardMap) + Index * sizeof (UINT64));\r
296\r
297 }\r
298\r
299 BitsToUnitEnd = GUARDED_HEAP_MAP_BITS - GUARDED_HEAP_MAP_BIT_INDEX (Address);\r
300 *BitMap = GuardMap;\r
301\r
302 return BitsToUnitEnd;\r
303}\r
304\r
305/**\r
306 Set corresponding bits in bitmap table to 1 according to given memory range.\r
307\r
308 @param[in] Address Memory address to guard from.\r
309 @param[in] NumberOfPages Number of pages to guard.\r
310\r
311 @return VOID.\r
312**/\r
313VOID\r
314EFIAPI\r
315SetGuardedMemoryBits (\r
316 IN EFI_PHYSICAL_ADDRESS Address,\r
317 IN UINTN NumberOfPages\r
318 )\r
319{\r
320 UINT64 *BitMap;\r
321 UINTN Bits;\r
322 UINTN BitsToUnitEnd;\r
323\r
324 while (NumberOfPages > 0) {\r
325 BitsToUnitEnd = FindGuardedMemoryMap (Address, TRUE, &BitMap);\r
326 ASSERT (BitMap != NULL);\r
327\r
328 if (NumberOfPages > BitsToUnitEnd) {\r
329 // Cross map unit\r
330 Bits = BitsToUnitEnd;\r
331 } else {\r
332 Bits = NumberOfPages;\r
333 }\r
334\r
335 SetBits (Address, Bits, BitMap);\r
336\r
337 NumberOfPages -= Bits;\r
338 Address += EFI_PAGES_TO_SIZE (Bits);\r
339 }\r
340}\r
341\r
342/**\r
343 Clear corresponding bits in bitmap table according to given memory range.\r
344\r
345 @param[in] Address Memory address to unset from.\r
346 @param[in] NumberOfPages Number of pages to unset guard.\r
347\r
348 @return VOID.\r
349**/\r
350VOID\r
351EFIAPI\r
352ClearGuardedMemoryBits (\r
353 IN EFI_PHYSICAL_ADDRESS Address,\r
354 IN UINTN NumberOfPages\r
355 )\r
356{\r
357 UINT64 *BitMap;\r
358 UINTN Bits;\r
359 UINTN BitsToUnitEnd;\r
360\r
361 while (NumberOfPages > 0) {\r
362 BitsToUnitEnd = FindGuardedMemoryMap (Address, TRUE, &BitMap);\r
363 ASSERT (BitMap != NULL);\r
364\r
365 if (NumberOfPages > BitsToUnitEnd) {\r
366 // Cross map unit\r
367 Bits = BitsToUnitEnd;\r
368 } else {\r
369 Bits = NumberOfPages;\r
370 }\r
371\r
372 ClearBits (Address, Bits, BitMap);\r
373\r
374 NumberOfPages -= Bits;\r
375 Address += EFI_PAGES_TO_SIZE (Bits);\r
376 }\r
377}\r
378\r
379/**\r
380 Retrieve corresponding bits in bitmap table according to given memory range.\r
381\r
382 @param[in] Address Memory address to retrieve from.\r
383 @param[in] NumberOfPages Number of pages to retrieve.\r
384\r
385 @return An integer containing the guarded memory bitmap.\r
386**/\r
63ebde8e 387UINT64\r
e63da9f0
JW
388GetGuardedMemoryBits (\r
389 IN EFI_PHYSICAL_ADDRESS Address,\r
390 IN UINTN NumberOfPages\r
391 )\r
392{\r
393 UINT64 *BitMap;\r
394 UINTN Bits;\r
63ebde8e 395 UINT64 Result;\r
e63da9f0
JW
396 UINTN Shift;\r
397 UINTN BitsToUnitEnd;\r
398\r
399 ASSERT (NumberOfPages <= GUARDED_HEAP_MAP_ENTRY_BITS);\r
400\r
401 Result = 0;\r
402 Shift = 0;\r
403 while (NumberOfPages > 0) {\r
404 BitsToUnitEnd = FindGuardedMemoryMap (Address, FALSE, &BitMap);\r
405\r
406 if (NumberOfPages > BitsToUnitEnd) {\r
407 // Cross map unit\r
408 Bits = BitsToUnitEnd;\r
409 } else {\r
410 Bits = NumberOfPages;\r
411 }\r
412\r
413 if (BitMap != NULL) {\r
414 Result |= LShiftU64 (GetBits (Address, Bits, BitMap), Shift);\r
415 }\r
416\r
417 Shift += Bits;\r
418 NumberOfPages -= Bits;\r
419 Address += EFI_PAGES_TO_SIZE (Bits);\r
420 }\r
421\r
422 return Result;\r
423}\r
424\r
425/**\r
426 Get bit value in bitmap table for the given address.\r
427\r
428 @param[in] Address The address to retrieve for.\r
429\r
430 @return 1 or 0.\r
431**/\r
432UINTN\r
433EFIAPI\r
434GetGuardMapBit (\r
435 IN EFI_PHYSICAL_ADDRESS Address\r
436 )\r
437{\r
438 UINT64 *GuardMap;\r
439\r
440 FindGuardedMemoryMap (Address, FALSE, &GuardMap);\r
441 if (GuardMap != NULL) {\r
442 if (RShiftU64 (*GuardMap,\r
443 GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address)) & 1) {\r
444 return 1;\r
445 }\r
446 }\r
447\r
448 return 0;\r
449}\r
450\r
e63da9f0
JW
451\r
452/**\r
453 Check to see if the page at the given address is a Guard page or not.\r
454\r
455 @param[in] Address The address to check for.\r
456\r
457 @return TRUE The page at Address is a Guard page.\r
458 @return FALSE The page at Address is not a Guard page.\r
459**/\r
460BOOLEAN\r
461EFIAPI\r
462IsGuardPage (\r
463 IN EFI_PHYSICAL_ADDRESS Address\r
464 )\r
465{\r
466 UINTN BitMap;\r
467\r
468 //\r
469 // There must be at least one guarded page before and/or after given\r
470 // address if it's a Guard page. The bitmap pattern should be one of\r
471 // 001, 100 and 101\r
472 //\r
473 BitMap = GetGuardedMemoryBits (Address - EFI_PAGE_SIZE, 3);\r
474 return ((BitMap == BIT0) || (BitMap == BIT2) || (BitMap == (BIT2 | BIT0)));\r
475}\r
476\r
e63da9f0
JW
477\r
478/**\r
479 Check to see if the page at the given address is guarded or not.\r
480\r
481 @param[in] Address The address to check for.\r
482\r
483 @return TRUE The page at Address is guarded.\r
484 @return FALSE The page at Address is not guarded.\r
485**/\r
486BOOLEAN\r
487EFIAPI\r
488IsMemoryGuarded (\r
489 IN EFI_PHYSICAL_ADDRESS Address\r
490 )\r
491{\r
492 return (GetGuardMapBit (Address) == 1);\r
493}\r
494\r
495/**\r
496 Set the page at the given address to be a Guard page.\r
497\r
498 This is done by changing the page table attribute to be NOT PRSENT.\r
499\r
500 @param[in] BaseAddress Page address to Guard at\r
501\r
502 @return VOID\r
503**/\r
504VOID\r
505EFIAPI\r
506SetGuardPage (\r
507 IN EFI_PHYSICAL_ADDRESS BaseAddress\r
508 )\r
509{\r
a5cd613c
JW
510 EFI_STATUS Status;\r
511\r
7fef06af
JW
512 if (gCpu == NULL) {\r
513 return;\r
514 }\r
515\r
e63da9f0
JW
516 //\r
517 // Set flag to make sure allocating memory without GUARD for page table\r
518 // operation; otherwise infinite loops could be caused.\r
519 //\r
520 mOnGuarding = TRUE;\r
521 //\r
522 // Note: This might overwrite other attributes needed by other features,\r
c44218e5 523 // such as NX memory protection.\r
e63da9f0 524 //\r
a5cd613c
JW
525 Status = gCpu->SetMemoryAttributes (gCpu, BaseAddress, EFI_PAGE_SIZE, EFI_MEMORY_RP);\r
526 ASSERT_EFI_ERROR (Status);\r
e63da9f0
JW
527 mOnGuarding = FALSE;\r
528}\r
529\r
530/**\r
531 Unset the Guard page at the given address to the normal memory.\r
532\r
533 This is done by changing the page table attribute to be PRSENT.\r
534\r
535 @param[in] BaseAddress Page address to Guard at.\r
536\r
537 @return VOID.\r
538**/\r
539VOID\r
540EFIAPI\r
541UnsetGuardPage (\r
542 IN EFI_PHYSICAL_ADDRESS BaseAddress\r
543 )\r
544{\r
c44218e5 545 UINT64 Attributes;\r
a5cd613c 546 EFI_STATUS Status;\r
c44218e5 547\r
7fef06af
JW
548 if (gCpu == NULL) {\r
549 return;\r
550 }\r
551\r
c44218e5
JW
552 //\r
553 // Once the Guard page is unset, it will be freed back to memory pool. NX\r
554 // memory protection must be restored for this page if NX is enabled for free\r
555 // memory.\r
556 //\r
557 Attributes = 0;\r
558 if ((PcdGet64 (PcdDxeNxMemoryProtectionPolicy) & (1 << EfiConventionalMemory)) != 0) {\r
559 Attributes |= EFI_MEMORY_XP;\r
560 }\r
561\r
e63da9f0
JW
562 //\r
563 // Set flag to make sure allocating memory without GUARD for page table\r
564 // operation; otherwise infinite loops could be caused.\r
565 //\r
566 mOnGuarding = TRUE;\r
567 //\r
568 // Note: This might overwrite other attributes needed by other features,\r
569 // such as memory protection (NX). Please make sure they are not enabled\r
570 // at the same time.\r
571 //\r
a5cd613c
JW
572 Status = gCpu->SetMemoryAttributes (gCpu, BaseAddress, EFI_PAGE_SIZE, Attributes);\r
573 ASSERT_EFI_ERROR (Status);\r
e63da9f0
JW
574 mOnGuarding = FALSE;\r
575}\r
576\r
577/**\r
578 Check to see if the memory at the given address should be guarded or not.\r
579\r
580 @param[in] MemoryType Memory type to check.\r
581 @param[in] AllocateType Allocation type to check.\r
582 @param[in] PageOrPool Indicate a page allocation or pool allocation.\r
583\r
584\r
585 @return TRUE The given type of memory should be guarded.\r
586 @return FALSE The given type of memory should not be guarded.\r
587**/\r
588BOOLEAN\r
589IsMemoryTypeToGuard (\r
590 IN EFI_MEMORY_TYPE MemoryType,\r
591 IN EFI_ALLOCATE_TYPE AllocateType,\r
592 IN UINT8 PageOrPool\r
593 )\r
594{\r
595 UINT64 TestBit;\r
596 UINT64 ConfigBit;\r
e63da9f0 597\r
7fef06af 598 if (AllocateType == AllocateAddress) {\r
e63da9f0
JW
599 return FALSE;\r
600 }\r
601\r
e63da9f0
JW
602 if ((PcdGet8 (PcdHeapGuardPropertyMask) & PageOrPool) == 0) {\r
603 return FALSE;\r
604 }\r
605\r
606 if (PageOrPool == GUARD_HEAP_TYPE_POOL) {\r
607 ConfigBit = PcdGet64 (PcdHeapGuardPoolType);\r
608 } else if (PageOrPool == GUARD_HEAP_TYPE_PAGE) {\r
609 ConfigBit = PcdGet64 (PcdHeapGuardPageType);\r
610 } else {\r
611 ConfigBit = (UINT64)-1;\r
612 }\r
613\r
614 if ((UINT32)MemoryType >= MEMORY_TYPE_OS_RESERVED_MIN) {\r
615 TestBit = BIT63;\r
616 } else if ((UINT32) MemoryType >= MEMORY_TYPE_OEM_RESERVED_MIN) {\r
617 TestBit = BIT62;\r
618 } else if (MemoryType < EfiMaxMemoryType) {\r
619 TestBit = LShiftU64 (1, MemoryType);\r
620 } else if (MemoryType == EfiMaxMemoryType) {\r
621 TestBit = (UINT64)-1;\r
622 } else {\r
623 TestBit = 0;\r
624 }\r
625\r
626 return ((ConfigBit & TestBit) != 0);\r
627}\r
628\r
629/**\r
630 Check to see if the pool at the given address should be guarded or not.\r
631\r
632 @param[in] MemoryType Pool type to check.\r
633\r
634\r
635 @return TRUE The given type of pool should be guarded.\r
636 @return FALSE The given type of pool should not be guarded.\r
637**/\r
638BOOLEAN\r
639IsPoolTypeToGuard (\r
640 IN EFI_MEMORY_TYPE MemoryType\r
641 )\r
642{\r
643 return IsMemoryTypeToGuard (MemoryType, AllocateAnyPages,\r
644 GUARD_HEAP_TYPE_POOL);\r
645}\r
646\r
647/**\r
648 Check to see if the page at the given address should be guarded or not.\r
649\r
650 @param[in] MemoryType Page type to check.\r
651 @param[in] AllocateType Allocation type to check.\r
652\r
653 @return TRUE The given type of page should be guarded.\r
654 @return FALSE The given type of page should not be guarded.\r
655**/\r
656BOOLEAN\r
657IsPageTypeToGuard (\r
658 IN EFI_MEMORY_TYPE MemoryType,\r
659 IN EFI_ALLOCATE_TYPE AllocateType\r
660 )\r
661{\r
662 return IsMemoryTypeToGuard (MemoryType, AllocateType, GUARD_HEAP_TYPE_PAGE);\r
663}\r
664\r
a6a0a597
JW
665/**\r
666 Check to see if the heap guard is enabled for page and/or pool allocation.\r
667\r
63ebde8e
JW
668 @param[in] GuardType Specify the sub-type(s) of Heap Guard.\r
669\r
a6a0a597
JW
670 @return TRUE/FALSE.\r
671**/\r
672BOOLEAN\r
673IsHeapGuardEnabled (\r
63ebde8e 674 UINT8 GuardType\r
a6a0a597
JW
675 )\r
676{\r
63ebde8e 677 return IsMemoryTypeToGuard (EfiMaxMemoryType, AllocateAnyPages, GuardType);\r
a6a0a597
JW
678}\r
679\r
e63da9f0
JW
680/**\r
681 Set head Guard and tail Guard for the given memory range.\r
682\r
683 @param[in] Memory Base address of memory to set guard for.\r
684 @param[in] NumberOfPages Memory size in pages.\r
685\r
686 @return VOID\r
687**/\r
688VOID\r
689SetGuardForMemory (\r
690 IN EFI_PHYSICAL_ADDRESS Memory,\r
691 IN UINTN NumberOfPages\r
692 )\r
693{\r
694 EFI_PHYSICAL_ADDRESS GuardPage;\r
695\r
696 //\r
697 // Set tail Guard\r
698 //\r
699 GuardPage = Memory + EFI_PAGES_TO_SIZE (NumberOfPages);\r
700 if (!IsGuardPage (GuardPage)) {\r
701 SetGuardPage (GuardPage);\r
702 }\r
703\r
704 // Set head Guard\r
705 GuardPage = Memory - EFI_PAGES_TO_SIZE (1);\r
706 if (!IsGuardPage (GuardPage)) {\r
707 SetGuardPage (GuardPage);\r
708 }\r
709\r
710 //\r
711 // Mark the memory range as Guarded\r
712 //\r
713 SetGuardedMemoryBits (Memory, NumberOfPages);\r
714}\r
715\r
716/**\r
717 Unset head Guard and tail Guard for the given memory range.\r
718\r
719 @param[in] Memory Base address of memory to unset guard for.\r
720 @param[in] NumberOfPages Memory size in pages.\r
721\r
722 @return VOID\r
723**/\r
724VOID\r
725UnsetGuardForMemory (\r
726 IN EFI_PHYSICAL_ADDRESS Memory,\r
727 IN UINTN NumberOfPages\r
728 )\r
729{\r
730 EFI_PHYSICAL_ADDRESS GuardPage;\r
6cf0a677 731 UINT64 GuardBitmap;\r
e63da9f0
JW
732\r
733 if (NumberOfPages == 0) {\r
734 return;\r
735 }\r
736\r
737 //\r
738 // Head Guard must be one page before, if any.\r
739 //\r
6cf0a677
JW
740 // MSB-> 1 0 <-LSB\r
741 // -------------------\r
742 // Head Guard -> 0 1 -> Don't free Head Guard (shared Guard)\r
743 // Head Guard -> 0 0 -> Free Head Guard either (not shared Guard)\r
744 // 1 X -> Don't free first page (need a new Guard)\r
745 // (it'll be turned into a Guard page later)\r
746 // -------------------\r
747 // Start -> -1 -2\r
748 //\r
e63da9f0 749 GuardPage = Memory - EFI_PAGES_TO_SIZE (1);\r
6cf0a677
JW
750 GuardBitmap = GetGuardedMemoryBits (Memory - EFI_PAGES_TO_SIZE (2), 2);\r
751 if ((GuardBitmap & BIT1) == 0) {\r
752 //\r
753 // Head Guard exists.\r
754 //\r
755 if ((GuardBitmap & BIT0) == 0) {\r
e63da9f0
JW
756 //\r
757 // If the head Guard is not a tail Guard of adjacent memory block,\r
758 // unset it.\r
759 //\r
760 UnsetGuardPage (GuardPage);\r
761 }\r
6cf0a677 762 } else {\r
e63da9f0
JW
763 //\r
764 // Pages before memory to free are still in Guard. It's a partial free\r
765 // case. Turn first page of memory block to free into a new Guard.\r
766 //\r
767 SetGuardPage (Memory);\r
768 }\r
769\r
770 //\r
771 // Tail Guard must be the page after this memory block to free, if any.\r
772 //\r
6cf0a677
JW
773 // MSB-> 1 0 <-LSB\r
774 // --------------------\r
775 // 1 0 <- Tail Guard -> Don't free Tail Guard (shared Guard)\r
776 // 0 0 <- Tail Guard -> Free Tail Guard either (not shared Guard)\r
777 // X 1 -> Don't free last page (need a new Guard)\r
778 // (it'll be turned into a Guard page later)\r
779 // --------------------\r
780 // +1 +0 <- End\r
781 //\r
e63da9f0 782 GuardPage = Memory + EFI_PAGES_TO_SIZE (NumberOfPages);\r
6cf0a677
JW
783 GuardBitmap = GetGuardedMemoryBits (GuardPage, 2);\r
784 if ((GuardBitmap & BIT0) == 0) {\r
785 //\r
786 // Tail Guard exists.\r
787 //\r
788 if ((GuardBitmap & BIT1) == 0) {\r
e63da9f0
JW
789 //\r
790 // If the tail Guard is not a head Guard of adjacent memory block,\r
791 // free it; otherwise, keep it.\r
792 //\r
793 UnsetGuardPage (GuardPage);\r
794 }\r
6cf0a677 795 } else {\r
e63da9f0
JW
796 //\r
797 // Pages after memory to free are still in Guard. It's a partial free\r
798 // case. We need to keep one page to be a head Guard.\r
799 //\r
800 SetGuardPage (GuardPage - EFI_PAGES_TO_SIZE (1));\r
801 }\r
802\r
803 //\r
804 // No matter what, we just clear the mark of the Guarded memory.\r
805 //\r
806 ClearGuardedMemoryBits(Memory, NumberOfPages);\r
807}\r
808\r
809/**\r
810 Adjust address of free memory according to existing and/or required Guard.\r
811\r
812 This function will check if there're existing Guard pages of adjacent\r
813 memory blocks, and try to use it as the Guard page of the memory to be\r
814 allocated.\r
815\r
816 @param[in] Start Start address of free memory block.\r
817 @param[in] Size Size of free memory block.\r
818 @param[in] SizeRequested Size of memory to allocate.\r
819\r
820 @return The end address of memory block found.\r
821 @return 0 if no enough space for the required size of memory and its Guard.\r
822**/\r
823UINT64\r
824AdjustMemoryS (\r
825 IN UINT64 Start,\r
826 IN UINT64 Size,\r
827 IN UINT64 SizeRequested\r
828 )\r
829{\r
830 UINT64 Target;\r
831\r
c44218e5
JW
832 //\r
833 // UEFI spec requires that allocated pool must be 8-byte aligned. If it's\r
834 // indicated to put the pool near the Tail Guard, we need extra bytes to\r
835 // make sure alignment of the returned pool address.\r
836 //\r
837 if ((PcdGet8 (PcdHeapGuardPropertyMask) & BIT7) == 0) {\r
838 SizeRequested = ALIGN_VALUE(SizeRequested, 8);\r
839 }\r
840\r
e63da9f0 841 Target = Start + Size - SizeRequested;\r
dd12683e
JW
842 ASSERT (Target >= Start);\r
843 if (Target == 0) {\r
844 return 0;\r
845 }\r
e63da9f0 846\r
e63da9f0
JW
847 if (!IsGuardPage (Start + Size)) {\r
848 // No Guard at tail to share. One more page is needed.\r
849 Target -= EFI_PAGES_TO_SIZE (1);\r
850 }\r
851\r
852 // Out of range?\r
853 if (Target < Start) {\r
854 return 0;\r
855 }\r
856\r
857 // At the edge?\r
858 if (Target == Start) {\r
859 if (!IsGuardPage (Target - EFI_PAGES_TO_SIZE (1))) {\r
860 // No enough space for a new head Guard if no Guard at head to share.\r
861 return 0;\r
862 }\r
863 }\r
864\r
865 // OK, we have enough pages for memory and its Guards. Return the End of the\r
866 // free space.\r
867 return Target + SizeRequested - 1;\r
868}\r
869\r
870/**\r
871 Adjust the start address and number of pages to free according to Guard.\r
872\r
873 The purpose of this function is to keep the shared Guard page with adjacent\r
874 memory block if it's still in guard, or free it if no more sharing. Another\r
875 is to reserve pages as Guard pages in partial page free situation.\r
876\r
877 @param[in,out] Memory Base address of memory to free.\r
878 @param[in,out] NumberOfPages Size of memory to free.\r
879\r
880 @return VOID.\r
881**/\r
882VOID\r
883AdjustMemoryF (\r
884 IN OUT EFI_PHYSICAL_ADDRESS *Memory,\r
885 IN OUT UINTN *NumberOfPages\r
886 )\r
887{\r
888 EFI_PHYSICAL_ADDRESS Start;\r
889 EFI_PHYSICAL_ADDRESS MemoryToTest;\r
890 UINTN PagesToFree;\r
6cf0a677 891 UINT64 GuardBitmap;\r
e63da9f0
JW
892\r
893 if (Memory == NULL || NumberOfPages == NULL || *NumberOfPages == 0) {\r
894 return;\r
895 }\r
896\r
897 Start = *Memory;\r
898 PagesToFree = *NumberOfPages;\r
899\r
900 //\r
901 // Head Guard must be one page before, if any.\r
902 //\r
6cf0a677
JW
903 // MSB-> 1 0 <-LSB\r
904 // -------------------\r
905 // Head Guard -> 0 1 -> Don't free Head Guard (shared Guard)\r
906 // Head Guard -> 0 0 -> Free Head Guard either (not shared Guard)\r
907 // 1 X -> Don't free first page (need a new Guard)\r
908 // (it'll be turned into a Guard page later)\r
909 // -------------------\r
910 // Start -> -1 -2\r
911 //\r
912 MemoryToTest = Start - EFI_PAGES_TO_SIZE (2);\r
913 GuardBitmap = GetGuardedMemoryBits (MemoryToTest, 2);\r
914 if ((GuardBitmap & BIT1) == 0) {\r
915 //\r
916 // Head Guard exists.\r
917 //\r
918 if ((GuardBitmap & BIT0) == 0) {\r
e63da9f0
JW
919 //\r
920 // If the head Guard is not a tail Guard of adjacent memory block,\r
921 // free it; otherwise, keep it.\r
922 //\r
923 Start -= EFI_PAGES_TO_SIZE (1);\r
924 PagesToFree += 1;\r
925 }\r
6cf0a677 926 } else {\r
e63da9f0 927 //\r
6cf0a677
JW
928 // No Head Guard, and pages before memory to free are still in Guard. It's a\r
929 // partial free case. We need to keep one page to be a tail Guard.\r
e63da9f0
JW
930 //\r
931 Start += EFI_PAGES_TO_SIZE (1);\r
932 PagesToFree -= 1;\r
933 }\r
934\r
935 //\r
936 // Tail Guard must be the page after this memory block to free, if any.\r
937 //\r
6cf0a677
JW
938 // MSB-> 1 0 <-LSB\r
939 // --------------------\r
940 // 1 0 <- Tail Guard -> Don't free Tail Guard (shared Guard)\r
941 // 0 0 <- Tail Guard -> Free Tail Guard either (not shared Guard)\r
942 // X 1 -> Don't free last page (need a new Guard)\r
943 // (it'll be turned into a Guard page later)\r
944 // --------------------\r
945 // +1 +0 <- End\r
946 //\r
e63da9f0 947 MemoryToTest = Start + EFI_PAGES_TO_SIZE (PagesToFree);\r
6cf0a677
JW
948 GuardBitmap = GetGuardedMemoryBits (MemoryToTest, 2);\r
949 if ((GuardBitmap & BIT0) == 0) {\r
950 //\r
951 // Tail Guard exists.\r
952 //\r
953 if ((GuardBitmap & BIT1) == 0) {\r
e63da9f0
JW
954 //\r
955 // If the tail Guard is not a head Guard of adjacent memory block,\r
956 // free it; otherwise, keep it.\r
957 //\r
958 PagesToFree += 1;\r
959 }\r
6cf0a677 960 } else if (PagesToFree > 0) {\r
e63da9f0 961 //\r
6cf0a677
JW
962 // No Tail Guard, and pages after memory to free are still in Guard. It's a\r
963 // partial free case. We need to keep one page to be a head Guard.\r
e63da9f0
JW
964 //\r
965 PagesToFree -= 1;\r
966 }\r
967\r
968 *Memory = Start;\r
969 *NumberOfPages = PagesToFree;\r
970}\r
971\r
972/**\r
973 Adjust the base and number of pages to really allocate according to Guard.\r
974\r
975 @param[in,out] Memory Base address of free memory.\r
976 @param[in,out] NumberOfPages Size of memory to allocate.\r
977\r
978 @return VOID.\r
979**/\r
980VOID\r
981AdjustMemoryA (\r
982 IN OUT EFI_PHYSICAL_ADDRESS *Memory,\r
983 IN OUT UINTN *NumberOfPages\r
984 )\r
985{\r
986 //\r
987 // FindFreePages() has already taken the Guard into account. It's safe to\r
988 // adjust the start address and/or number of pages here, to make sure that\r
989 // the Guards are also "allocated".\r
990 //\r
991 if (!IsGuardPage (*Memory + EFI_PAGES_TO_SIZE (*NumberOfPages))) {\r
992 // No tail Guard, add one.\r
993 *NumberOfPages += 1;\r
994 }\r
995\r
996 if (!IsGuardPage (*Memory - EFI_PAGE_SIZE)) {\r
997 // No head Guard, add one.\r
998 *Memory -= EFI_PAGE_SIZE;\r
999 *NumberOfPages += 1;\r
1000 }\r
1001}\r
1002\r
1003/**\r
1004 Adjust the pool head position to make sure the Guard page is adjavent to\r
1005 pool tail or pool head.\r
1006\r
1007 @param[in] Memory Base address of memory allocated.\r
1008 @param[in] NoPages Number of pages actually allocated.\r
1009 @param[in] Size Size of memory requested.\r
1010 (plus pool head/tail overhead)\r
1011\r
1012 @return Address of pool head.\r
1013**/\r
1014VOID *\r
1015AdjustPoolHeadA (\r
1016 IN EFI_PHYSICAL_ADDRESS Memory,\r
1017 IN UINTN NoPages,\r
1018 IN UINTN Size\r
1019 )\r
1020{\r
c44218e5 1021 if (Memory == 0 || (PcdGet8 (PcdHeapGuardPropertyMask) & BIT7) != 0) {\r
e63da9f0
JW
1022 //\r
1023 // Pool head is put near the head Guard\r
1024 //\r
1025 return (VOID *)(UINTN)Memory;\r
1026 }\r
1027\r
1028 //\r
1029 // Pool head is put near the tail Guard\r
1030 //\r
c44218e5 1031 Size = ALIGN_VALUE (Size, 8);\r
e63da9f0
JW
1032 return (VOID *)(UINTN)(Memory + EFI_PAGES_TO_SIZE (NoPages) - Size);\r
1033}\r
1034\r
1035/**\r
1036 Get the page base address according to pool head address.\r
1037\r
1038 @param[in] Memory Head address of pool to free.\r
1039\r
1040 @return Address of pool head.\r
1041**/\r
1042VOID *\r
1043AdjustPoolHeadF (\r
1044 IN EFI_PHYSICAL_ADDRESS Memory\r
1045 )\r
1046{\r
c44218e5 1047 if (Memory == 0 || (PcdGet8 (PcdHeapGuardPropertyMask) & BIT7) != 0) {\r
e63da9f0
JW
1048 //\r
1049 // Pool head is put near the head Guard\r
1050 //\r
1051 return (VOID *)(UINTN)Memory;\r
1052 }\r
1053\r
1054 //\r
1055 // Pool head is put near the tail Guard\r
1056 //\r
1057 return (VOID *)(UINTN)(Memory & ~EFI_PAGE_MASK);\r
1058}\r
1059\r
1060/**\r
1061 Allocate or free guarded memory.\r
1062\r
1063 @param[in] Start Start address of memory to allocate or free.\r
1064 @param[in] NumberOfPages Memory size in pages.\r
1065 @param[in] NewType Memory type to convert to.\r
1066\r
1067 @return VOID.\r
1068**/\r
1069EFI_STATUS\r
1070CoreConvertPagesWithGuard (\r
1071 IN UINT64 Start,\r
1072 IN UINTN NumberOfPages,\r
1073 IN EFI_MEMORY_TYPE NewType\r
1074 )\r
1075{\r
425d2569
JW
1076 UINT64 OldStart;\r
1077 UINTN OldPages;\r
1078\r
e63da9f0 1079 if (NewType == EfiConventionalMemory) {\r
425d2569
JW
1080 OldStart = Start;\r
1081 OldPages = NumberOfPages;\r
1082\r
e63da9f0 1083 AdjustMemoryF (&Start, &NumberOfPages);\r
425d2569
JW
1084 //\r
1085 // It's safe to unset Guard page inside memory lock because there should\r
1086 // be no memory allocation occurred in updating memory page attribute at\r
1087 // this point. And unsetting Guard page before free will prevent Guard\r
1088 // page just freed back to pool from being allocated right away before\r
1089 // marking it usable (from non-present to present).\r
1090 //\r
1091 UnsetGuardForMemory (OldStart, OldPages);\r
1263ecf2
JW
1092 if (NumberOfPages == 0) {\r
1093 return EFI_SUCCESS;\r
1094 }\r
e63da9f0
JW
1095 } else {\r
1096 AdjustMemoryA (&Start, &NumberOfPages);\r
1097 }\r
1098\r
6cf0a677 1099 return CoreConvertPages (Start, NumberOfPages, NewType);\r
e63da9f0
JW
1100}\r
1101\r
7fef06af
JW
1102/**\r
1103 Set all Guard pages which cannot be set before CPU Arch Protocol installed.\r
1104**/\r
1105VOID\r
1106SetAllGuardPages (\r
1107 VOID\r
1108 )\r
1109{\r
1110 UINTN Entries[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1111 UINTN Shifts[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1112 UINTN Indices[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1113 UINT64 Tables[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1114 UINT64 Addresses[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1115 UINT64 TableEntry;\r
1116 UINT64 Address;\r
1117 UINT64 GuardPage;\r
1118 INTN Level;\r
1119 UINTN Index;\r
1120 BOOLEAN OnGuarding;\r
1121\r
1122 if (mGuardedMemoryMap == 0 ||\r
1123 mMapLevel == 0 ||\r
1124 mMapLevel > GUARDED_HEAP_MAP_TABLE_DEPTH) {\r
1125 return;\r
1126 }\r
1127\r
1128 CopyMem (Entries, mLevelMask, sizeof (Entries));\r
1129 CopyMem (Shifts, mLevelShift, sizeof (Shifts));\r
1130\r
1131 SetMem (Tables, sizeof(Tables), 0);\r
1132 SetMem (Addresses, sizeof(Addresses), 0);\r
1133 SetMem (Indices, sizeof(Indices), 0);\r
1134\r
1135 Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;\r
1136 Tables[Level] = mGuardedMemoryMap;\r
1137 Address = 0;\r
1138 OnGuarding = FALSE;\r
1139\r
1140 DEBUG_CODE (\r
1141 DumpGuardedMemoryBitmap ();\r
1142 );\r
1143\r
1144 while (TRUE) {\r
1145 if (Indices[Level] > Entries[Level]) {\r
1146 Tables[Level] = 0;\r
1147 Level -= 1;\r
1148 } else {\r
1149\r
1150 TableEntry = ((UINT64 *)(UINTN)(Tables[Level]))[Indices[Level]];\r
1151 Address = Addresses[Level];\r
1152\r
1153 if (TableEntry == 0) {\r
1154\r
1155 OnGuarding = FALSE;\r
1156\r
1157 } else if (Level < GUARDED_HEAP_MAP_TABLE_DEPTH - 1) {\r
1158\r
1159 Level += 1;\r
1160 Tables[Level] = TableEntry;\r
1161 Addresses[Level] = Address;\r
1162 Indices[Level] = 0;\r
1163\r
1164 continue;\r
1165\r
1166 } else {\r
1167\r
1168 Index = 0;\r
1169 while (Index < GUARDED_HEAP_MAP_ENTRY_BITS) {\r
1170 if ((TableEntry & 1) == 1) {\r
1171 if (OnGuarding) {\r
1172 GuardPage = 0;\r
1173 } else {\r
1174 GuardPage = Address - EFI_PAGE_SIZE;\r
1175 }\r
1176 OnGuarding = TRUE;\r
1177 } else {\r
1178 if (OnGuarding) {\r
1179 GuardPage = Address;\r
1180 } else {\r
1181 GuardPage = 0;\r
1182 }\r
1183 OnGuarding = FALSE;\r
1184 }\r
1185\r
1186 if (GuardPage != 0) {\r
1187 SetGuardPage (GuardPage);\r
1188 }\r
1189\r
1190 if (TableEntry == 0) {\r
1191 break;\r
1192 }\r
1193\r
1194 TableEntry = RShiftU64 (TableEntry, 1);\r
1195 Address += EFI_PAGE_SIZE;\r
1196 Index += 1;\r
1197 }\r
1198 }\r
1199 }\r
1200\r
1201 if (Level < (GUARDED_HEAP_MAP_TABLE_DEPTH - (INTN)mMapLevel)) {\r
1202 break;\r
1203 }\r
1204\r
1205 Indices[Level] += 1;\r
1206 Address = (Level == 0) ? 0 : Addresses[Level - 1];\r
1207 Addresses[Level] = Address | LShiftU64(Indices[Level], Shifts[Level]);\r
1208\r
1209 }\r
1210}\r
1211\r
63ebde8e
JW
1212/**\r
1213 Find the address of top-most guarded free page.\r
1214\r
1215 @param[out] Address Start address of top-most guarded free page.\r
1216\r
1217 @return VOID.\r
1218**/\r
1219VOID\r
1220GetLastGuardedFreePageAddress (\r
1221 OUT EFI_PHYSICAL_ADDRESS *Address\r
1222 )\r
1223{\r
1224 EFI_PHYSICAL_ADDRESS AddressGranularity;\r
1225 EFI_PHYSICAL_ADDRESS BaseAddress;\r
1226 UINTN Level;\r
1227 UINT64 Map;\r
1228 INTN Index;\r
1229\r
1230 ASSERT (mMapLevel >= 1);\r
1231\r
1232 BaseAddress = 0;\r
1233 Map = mGuardedMemoryMap;\r
1234 for (Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;\r
1235 Level < GUARDED_HEAP_MAP_TABLE_DEPTH;\r
1236 ++Level) {\r
1237 AddressGranularity = LShiftU64 (1, mLevelShift[Level]);\r
1238\r
1239 //\r
1240 // Find the non-NULL entry at largest index.\r
1241 //\r
1242 for (Index = (INTN)mLevelMask[Level]; Index >= 0 ; --Index) {\r
1243 if (((UINT64 *)(UINTN)Map)[Index] != 0) {\r
1244 BaseAddress += MultU64x32 (AddressGranularity, (UINT32)Index);\r
1245 Map = ((UINT64 *)(UINTN)Map)[Index];\r
1246 break;\r
1247 }\r
1248 }\r
1249 }\r
1250\r
1251 //\r
1252 // Find the non-zero MSB then get the page address.\r
1253 //\r
1254 while (Map != 0) {\r
1255 Map = RShiftU64 (Map, 1);\r
1256 BaseAddress += EFI_PAGES_TO_SIZE (1);\r
1257 }\r
1258\r
1259 *Address = BaseAddress;\r
1260}\r
1261\r
1262/**\r
1263 Record freed pages.\r
1264\r
1265 @param[in] BaseAddress Base address of just freed pages.\r
1266 @param[in] Pages Number of freed pages.\r
1267\r
1268 @return VOID.\r
1269**/\r
1270VOID\r
1271MarkFreedPages (\r
1272 IN EFI_PHYSICAL_ADDRESS BaseAddress,\r
1273 IN UINTN Pages\r
1274 )\r
1275{\r
1276 SetGuardedMemoryBits (BaseAddress, Pages);\r
1277}\r
1278\r
1279/**\r
1280 Record freed pages as well as mark them as not-present.\r
1281\r
1282 @param[in] BaseAddress Base address of just freed pages.\r
1283 @param[in] Pages Number of freed pages.\r
1284\r
1285 @return VOID.\r
1286**/\r
1287VOID\r
1288EFIAPI\r
1289GuardFreedPages (\r
1290 IN EFI_PHYSICAL_ADDRESS BaseAddress,\r
1291 IN UINTN Pages\r
1292 )\r
1293{\r
1294 EFI_STATUS Status;\r
1295\r
1296 //\r
1297 // Legacy memory lower than 1MB might be accessed with no allocation. Leave\r
1298 // them alone.\r
1299 //\r
1300 if (BaseAddress < BASE_1MB) {\r
1301 return;\r
1302 }\r
1303\r
1304 MarkFreedPages (BaseAddress, Pages);\r
1305 if (gCpu != NULL) {\r
1306 //\r
1307 // Set flag to make sure allocating memory without GUARD for page table\r
1308 // operation; otherwise infinite loops could be caused.\r
1309 //\r
1310 mOnGuarding = TRUE;\r
1311 //\r
1312 // Note: This might overwrite other attributes needed by other features,\r
1313 // such as NX memory protection.\r
1314 //\r
1315 Status = gCpu->SetMemoryAttributes (\r
1316 gCpu,\r
1317 BaseAddress,\r
1318 EFI_PAGES_TO_SIZE (Pages),\r
1319 EFI_MEMORY_RP\r
1320 );\r
1321 //\r
1322 // Normally we should ASSERT the returned Status. But there might be memory\r
1323 // alloc/free involved in SetMemoryAttributes(), which might fail this\r
1324 // calling. It's rare case so it's OK to let a few tiny holes be not-guarded.\r
1325 //\r
1326 if (EFI_ERROR (Status)) {\r
1327 DEBUG ((DEBUG_WARN, "Failed to guard freed pages: %p (%lu)\n", BaseAddress, (UINT64)Pages));\r
1328 }\r
1329 mOnGuarding = FALSE;\r
1330 }\r
1331}\r
1332\r
1333/**\r
1334 Record freed pages as well as mark them as not-present, if enabled.\r
1335\r
1336 @param[in] BaseAddress Base address of just freed pages.\r
1337 @param[in] Pages Number of freed pages.\r
1338\r
1339 @return VOID.\r
1340**/\r
1341VOID\r
1342EFIAPI\r
1343GuardFreedPagesChecked (\r
1344 IN EFI_PHYSICAL_ADDRESS BaseAddress,\r
1345 IN UINTN Pages\r
1346 )\r
1347{\r
1348 if (IsHeapGuardEnabled (GUARD_HEAP_TYPE_FREED)) {\r
1349 GuardFreedPages (BaseAddress, Pages);\r
1350 }\r
1351}\r
1352\r
1353/**\r
1354 Mark all pages freed before CPU Arch Protocol as not-present.\r
1355\r
1356**/\r
1357VOID\r
1358GuardAllFreedPages (\r
1359 VOID\r
1360 )\r
1361{\r
1362 UINTN Entries[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1363 UINTN Shifts[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1364 UINTN Indices[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1365 UINT64 Tables[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1366 UINT64 Addresses[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1367 UINT64 TableEntry;\r
1368 UINT64 Address;\r
1369 UINT64 GuardPage;\r
1370 INTN Level;\r
1371 UINTN BitIndex;\r
1372 UINTN GuardPageNumber;\r
1373\r
1374 if (mGuardedMemoryMap == 0 ||\r
1375 mMapLevel == 0 ||\r
1376 mMapLevel > GUARDED_HEAP_MAP_TABLE_DEPTH) {\r
1377 return;\r
1378 }\r
1379\r
1380 CopyMem (Entries, mLevelMask, sizeof (Entries));\r
1381 CopyMem (Shifts, mLevelShift, sizeof (Shifts));\r
1382\r
1383 SetMem (Tables, sizeof(Tables), 0);\r
1384 SetMem (Addresses, sizeof(Addresses), 0);\r
1385 SetMem (Indices, sizeof(Indices), 0);\r
1386\r
1387 Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;\r
1388 Tables[Level] = mGuardedMemoryMap;\r
1389 Address = 0;\r
1390 GuardPage = (UINT64)-1;\r
1391 GuardPageNumber = 0;\r
1392\r
1393 while (TRUE) {\r
1394 if (Indices[Level] > Entries[Level]) {\r
1395 Tables[Level] = 0;\r
1396 Level -= 1;\r
1397 } else {\r
1398 TableEntry = ((UINT64 *)(UINTN)(Tables[Level]))[Indices[Level]];\r
1399 Address = Addresses[Level];\r
1400\r
1401 if (Level < GUARDED_HEAP_MAP_TABLE_DEPTH - 1) {\r
1402 Level += 1;\r
1403 Tables[Level] = TableEntry;\r
1404 Addresses[Level] = Address;\r
1405 Indices[Level] = 0;\r
1406\r
1407 continue;\r
1408 } else {\r
1409 BitIndex = 1;\r
1410 while (BitIndex != 0) {\r
1411 if ((TableEntry & BitIndex) != 0) {\r
1412 if (GuardPage == (UINT64)-1) {\r
1413 GuardPage = Address;\r
1414 }\r
1415 ++GuardPageNumber;\r
1416 } else if (GuardPageNumber > 0) {\r
1417 GuardFreedPages (GuardPage, GuardPageNumber);\r
1418 GuardPageNumber = 0;\r
1419 GuardPage = (UINT64)-1;\r
1420 }\r
1421\r
1422 if (TableEntry == 0) {\r
1423 break;\r
1424 }\r
1425\r
1426 Address += EFI_PAGES_TO_SIZE (1);\r
1427 BitIndex = LShiftU64 (BitIndex, 1);\r
1428 }\r
1429 }\r
1430 }\r
1431\r
1432 if (Level < (GUARDED_HEAP_MAP_TABLE_DEPTH - (INTN)mMapLevel)) {\r
1433 break;\r
1434 }\r
1435\r
1436 Indices[Level] += 1;\r
1437 Address = (Level == 0) ? 0 : Addresses[Level - 1];\r
1438 Addresses[Level] = Address | LShiftU64 (Indices[Level], Shifts[Level]);\r
1439\r
1440 }\r
1441\r
1442 //\r
1443 // Update the maximum address of freed page which can be used for memory\r
1444 // promotion upon out-of-memory-space.\r
1445 //\r
1446 GetLastGuardedFreePageAddress (&Address);\r
1447 if (Address != 0) {\r
1448 mLastPromotedPage = Address;\r
1449 }\r
1450}\r
1451\r
1452/**\r
1453 This function checks to see if the given memory map descriptor in a memory map\r
1454 can be merged with any guarded free pages.\r
1455\r
1456 @param MemoryMapEntry A pointer to a descriptor in MemoryMap.\r
1457 @param MaxAddress Maximum address to stop the merge.\r
1458\r
1459 @return VOID\r
1460\r
1461**/\r
1462VOID\r
1463MergeGuardPages (\r
1464 IN EFI_MEMORY_DESCRIPTOR *MemoryMapEntry,\r
1465 IN EFI_PHYSICAL_ADDRESS MaxAddress\r
1466 )\r
1467{\r
1468 EFI_PHYSICAL_ADDRESS EndAddress;\r
1469 UINT64 Bitmap;\r
1470 INTN Pages;\r
1471\r
1472 if (!IsHeapGuardEnabled (GUARD_HEAP_TYPE_FREED) ||\r
1473 MemoryMapEntry->Type >= EfiMemoryMappedIO) {\r
1474 return;\r
1475 }\r
1476\r
1477 Bitmap = 0;\r
1478 Pages = EFI_SIZE_TO_PAGES (MaxAddress - MemoryMapEntry->PhysicalStart);\r
1479 Pages -= MemoryMapEntry->NumberOfPages;\r
1480 while (Pages > 0) {\r
1481 if (Bitmap == 0) {\r
1482 EndAddress = MemoryMapEntry->PhysicalStart +\r
1483 EFI_PAGES_TO_SIZE (MemoryMapEntry->NumberOfPages);\r
1484 Bitmap = GetGuardedMemoryBits (EndAddress, GUARDED_HEAP_MAP_ENTRY_BITS);\r
1485 }\r
1486\r
1487 if ((Bitmap & 1) == 0) {\r
1488 break;\r
1489 }\r
1490\r
1491 Pages--;\r
1492 MemoryMapEntry->NumberOfPages++;\r
1493 Bitmap = RShiftU64 (Bitmap, 1);\r
1494 }\r
1495}\r
1496\r
1497/**\r
1498 Put part (at most 64 pages a time) guarded free pages back to free page pool.\r
1499\r
1500 Freed memory guard is used to detect Use-After-Free (UAF) memory issue, which\r
1501 makes use of 'Used then throw away' way to detect any illegal access to freed\r
1502 memory. The thrown-away memory will be marked as not-present so that any access\r
1503 to those memory (after free) will be caught by page-fault exception.\r
1504\r
1505 The problem is that this will consume lots of memory space. Once no memory\r
1506 left in pool to allocate, we have to restore part of the freed pages to their\r
1507 normal function. Otherwise the whole system will stop functioning.\r
1508\r
1509 @param StartAddress Start address of promoted memory.\r
1510 @param EndAddress End address of promoted memory.\r
1511\r
1512 @return TRUE Succeeded to promote memory.\r
1513 @return FALSE No free memory found.\r
1514\r
1515**/\r
1516BOOLEAN\r
1517PromoteGuardedFreePages (\r
1518 OUT EFI_PHYSICAL_ADDRESS *StartAddress,\r
1519 OUT EFI_PHYSICAL_ADDRESS *EndAddress\r
1520 )\r
1521{\r
1522 EFI_STATUS Status;\r
1523 UINTN AvailablePages;\r
1524 UINT64 Bitmap;\r
1525 EFI_PHYSICAL_ADDRESS Start;\r
1526\r
1527 if (!IsHeapGuardEnabled (GUARD_HEAP_TYPE_FREED)) {\r
1528 return FALSE;\r
1529 }\r
1530\r
1531 //\r
1532 // Similar to memory allocation service, always search the freed pages in\r
1533 // descending direction.\r
1534 //\r
1535 Start = mLastPromotedPage;\r
1536 AvailablePages = 0;\r
1537 while (AvailablePages == 0) {\r
1538 Start -= EFI_PAGES_TO_SIZE (GUARDED_HEAP_MAP_ENTRY_BITS);\r
1539 //\r
1540 // If the address wraps around, try the really freed pages at top.\r
1541 //\r
1542 if (Start > mLastPromotedPage) {\r
1543 GetLastGuardedFreePageAddress (&Start);\r
1544 ASSERT (Start != 0);\r
1545 Start -= EFI_PAGES_TO_SIZE (GUARDED_HEAP_MAP_ENTRY_BITS);\r
1546 }\r
1547\r
1548 Bitmap = GetGuardedMemoryBits (Start, GUARDED_HEAP_MAP_ENTRY_BITS);\r
1549 while (Bitmap > 0) {\r
1550 if ((Bitmap & 1) != 0) {\r
1551 ++AvailablePages;\r
1552 } else if (AvailablePages == 0) {\r
1553 Start += EFI_PAGES_TO_SIZE (1);\r
1554 } else {\r
1555 break;\r
1556 }\r
1557\r
1558 Bitmap = RShiftU64 (Bitmap, 1);\r
1559 }\r
1560 }\r
1561\r
1562 if (AvailablePages) {\r
1563 DEBUG ((DEBUG_INFO, "Promoted pages: %lX (%lx)\r\n", Start, (UINT64)AvailablePages));\r
1564 ClearGuardedMemoryBits (Start, AvailablePages);\r
1565\r
1566 if (gCpu != NULL) {\r
1567 //\r
1568 // Set flag to make sure allocating memory without GUARD for page table\r
1569 // operation; otherwise infinite loops could be caused.\r
1570 //\r
1571 mOnGuarding = TRUE;\r
1572 Status = gCpu->SetMemoryAttributes (gCpu, Start, EFI_PAGES_TO_SIZE(AvailablePages), 0);\r
1573 ASSERT_EFI_ERROR (Status);\r
1574 mOnGuarding = FALSE;\r
1575 }\r
1576\r
1577 mLastPromotedPage = Start;\r
1578 *StartAddress = Start;\r
1579 *EndAddress = Start + EFI_PAGES_TO_SIZE (AvailablePages) - 1;\r
1580 return TRUE;\r
1581 }\r
1582\r
1583 return FALSE;\r
1584}\r
1585\r
7fef06af
JW
1586/**\r
1587 Notify function used to set all Guard pages before CPU Arch Protocol installed.\r
1588**/\r
1589VOID\r
1590HeapGuardCpuArchProtocolNotify (\r
1591 VOID\r
1592 )\r
1593{\r
1594 ASSERT (gCpu != NULL);\r
63ebde8e
JW
1595\r
1596 if (IsHeapGuardEnabled (GUARD_HEAP_TYPE_PAGE|GUARD_HEAP_TYPE_POOL) &&\r
1597 IsHeapGuardEnabled (GUARD_HEAP_TYPE_FREED)) {\r
1598 DEBUG ((DEBUG_ERROR, "Heap guard and freed memory guard cannot be enabled at the same time.\n"));\r
1599 CpuDeadLoop ();\r
1600 }\r
1601\r
1602 if (IsHeapGuardEnabled (GUARD_HEAP_TYPE_PAGE|GUARD_HEAP_TYPE_POOL)) {\r
1603 SetAllGuardPages ();\r
1604 }\r
1605\r
1606 if (IsHeapGuardEnabled (GUARD_HEAP_TYPE_FREED)) {\r
1607 GuardAllFreedPages ();\r
1608 }\r
7fef06af
JW
1609}\r
1610\r
e63da9f0
JW
1611/**\r
1612 Helper function to convert a UINT64 value in binary to a string.\r
1613\r
1614 @param[in] Value Value of a UINT64 integer.\r
1615 @param[out] BinString String buffer to contain the conversion result.\r
1616\r
1617 @return VOID.\r
1618**/\r
1619VOID\r
1620Uint64ToBinString (\r
1621 IN UINT64 Value,\r
1622 OUT CHAR8 *BinString\r
1623 )\r
1624{\r
1625 UINTN Index;\r
1626\r
1627 if (BinString == NULL) {\r
1628 return;\r
1629 }\r
1630\r
1631 for (Index = 64; Index > 0; --Index) {\r
1632 BinString[Index - 1] = '0' + (Value & 1);\r
1633 Value = RShiftU64 (Value, 1);\r
1634 }\r
1635 BinString[64] = '\0';\r
1636}\r
1637\r
1638/**\r
1639 Dump the guarded memory bit map.\r
1640**/\r
1641VOID\r
1642EFIAPI\r
1643DumpGuardedMemoryBitmap (\r
1644 VOID\r
1645 )\r
1646{\r
1647 UINTN Entries[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1648 UINTN Shifts[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1649 UINTN Indices[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1650 UINT64 Tables[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1651 UINT64 Addresses[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1652 UINT64 TableEntry;\r
1653 UINT64 Address;\r
1654 INTN Level;\r
1655 UINTN RepeatZero;\r
1656 CHAR8 String[GUARDED_HEAP_MAP_ENTRY_BITS + 1];\r
1657 CHAR8 *Ruler1;\r
1658 CHAR8 *Ruler2;\r
1659\r
63ebde8e
JW
1660 if (!IsHeapGuardEnabled (GUARD_HEAP_TYPE_ALL)) {\r
1661 return;\r
1662 }\r
1663\r
c6c50165
JW
1664 if (mGuardedMemoryMap == 0 ||\r
1665 mMapLevel == 0 ||\r
1666 mMapLevel > GUARDED_HEAP_MAP_TABLE_DEPTH) {\r
e63da9f0
JW
1667 return;\r
1668 }\r
1669\r
1670 Ruler1 = " 3 2 1 0";\r
1671 Ruler2 = "FEDCBA9876543210FEDCBA9876543210FEDCBA9876543210FEDCBA9876543210";\r
1672\r
1673 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "============================="\r
1674 " Guarded Memory Bitmap "\r
1675 "==============================\r\n"));\r
1676 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, " %a\r\n", Ruler1));\r
1677 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, " %a\r\n", Ruler2));\r
1678\r
1679 CopyMem (Entries, mLevelMask, sizeof (Entries));\r
1680 CopyMem (Shifts, mLevelShift, sizeof (Shifts));\r
1681\r
1682 SetMem (Indices, sizeof(Indices), 0);\r
1683 SetMem (Tables, sizeof(Tables), 0);\r
1684 SetMem (Addresses, sizeof(Addresses), 0);\r
1685\r
1686 Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;\r
1687 Tables[Level] = mGuardedMemoryMap;\r
1688 Address = 0;\r
1689 RepeatZero = 0;\r
1690\r
1691 while (TRUE) {\r
1692 if (Indices[Level] > Entries[Level]) {\r
1693\r
1694 Tables[Level] = 0;\r
1695 Level -= 1;\r
1696 RepeatZero = 0;\r
1697\r
1698 DEBUG ((\r
1699 HEAP_GUARD_DEBUG_LEVEL,\r
1700 "========================================="\r
1701 "=========================================\r\n"\r
1702 ));\r
1703\r
1704 } else {\r
1705\r
1706 TableEntry = ((UINT64 *)(UINTN)Tables[Level])[Indices[Level]];\r
1707 Address = Addresses[Level];\r
1708\r
1709 if (TableEntry == 0) {\r
1710\r
1711 if (Level == GUARDED_HEAP_MAP_TABLE_DEPTH - 1) {\r
1712 if (RepeatZero == 0) {\r
1713 Uint64ToBinString(TableEntry, String);\r
1714 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "%016lx: %a\r\n", Address, String));\r
1715 } else if (RepeatZero == 1) {\r
1716 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "... : ...\r\n"));\r
1717 }\r
1718 RepeatZero += 1;\r
1719 }\r
1720\r
1721 } else if (Level < GUARDED_HEAP_MAP_TABLE_DEPTH - 1) {\r
1722\r
1723 Level += 1;\r
1724 Tables[Level] = TableEntry;\r
1725 Addresses[Level] = Address;\r
1726 Indices[Level] = 0;\r
1727 RepeatZero = 0;\r
1728\r
1729 continue;\r
1730\r
1731 } else {\r
1732\r
1733 RepeatZero = 0;\r
1734 Uint64ToBinString(TableEntry, String);\r
1735 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "%016lx: %a\r\n", Address, String));\r
1736\r
1737 }\r
1738 }\r
1739\r
1740 if (Level < (GUARDED_HEAP_MAP_TABLE_DEPTH - (INTN)mMapLevel)) {\r
1741 break;\r
1742 }\r
1743\r
1744 Indices[Level] += 1;\r
1745 Address = (Level == 0) ? 0 : Addresses[Level - 1];\r
1746 Addresses[Level] = Address | LShiftU64(Indices[Level], Shifts[Level]);\r
1747\r
1748 }\r
1749}\r
1750\r