MdeModulePkg/PiSmmCore: fix bits operation error on a boundary condition
[mirror_edk2.git] / MdeModulePkg / Core / PiSmmCore / HeapGuard.c
CommitLineData
e63da9f0
JW
1/** @file\r
2 UEFI Heap Guard functions.\r
3\r
8b13bca9 4Copyright (c) 2017-2018, Intel Corporation. All rights reserved.<BR>\r
e63da9f0
JW
5This program and the accompanying materials\r
6are licensed and made available under the terms and conditions of the BSD License\r
7which accompanies this distribution. The full text of the license may be found at\r
8http://opensource.org/licenses/bsd-license.php\r
9\r
10THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
11WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
12\r
13**/\r
14\r
15#include "HeapGuard.h"\r
16\r
17//\r
18// Global to avoid infinite reentrance of memory allocation when updating\r
19// page table attributes, which may need allocating pages for new PDE/PTE.\r
20//\r
21GLOBAL_REMOVE_IF_UNREFERENCED BOOLEAN mOnGuarding = FALSE;\r
22\r
23//\r
24// Pointer to table tracking the Guarded memory with bitmap, in which '1'\r
25// is used to indicate memory guarded. '0' might be free memory or Guard\r
26// page itself, depending on status of memory adjacent to it.\r
27//\r
28GLOBAL_REMOVE_IF_UNREFERENCED UINT64 mGuardedMemoryMap = 0;\r
29\r
30//\r
31// Current depth level of map table pointed by mGuardedMemoryMap.\r
32// mMapLevel must be initialized at least by 1. It will be automatically\r
33// updated according to the address of memory just tracked.\r
34//\r
35GLOBAL_REMOVE_IF_UNREFERENCED UINTN mMapLevel = 1;\r
36\r
37//\r
38// Shift and mask for each level of map table\r
39//\r
40GLOBAL_REMOVE_IF_UNREFERENCED UINTN mLevelShift[GUARDED_HEAP_MAP_TABLE_DEPTH]\r
41 = GUARDED_HEAP_MAP_TABLE_DEPTH_SHIFTS;\r
42GLOBAL_REMOVE_IF_UNREFERENCED UINTN mLevelMask[GUARDED_HEAP_MAP_TABLE_DEPTH]\r
43 = GUARDED_HEAP_MAP_TABLE_DEPTH_MASKS;\r
44\r
45//\r
46// SMM memory attribute protocol\r
47//\r
48EDKII_SMM_MEMORY_ATTRIBUTE_PROTOCOL *mSmmMemoryAttribute = NULL;\r
49\r
50/**\r
51 Set corresponding bits in bitmap table to 1 according to the address.\r
52\r
53 @param[in] Address Start address to set for.\r
54 @param[in] BitNumber Number of bits to set.\r
55 @param[in] BitMap Pointer to bitmap which covers the Address.\r
56\r
57 @return VOID\r
58**/\r
59STATIC\r
60VOID\r
61SetBits (\r
62 IN EFI_PHYSICAL_ADDRESS Address,\r
63 IN UINTN BitNumber,\r
64 IN UINT64 *BitMap\r
65 )\r
66{\r
67 UINTN Lsbs;\r
68 UINTN Qwords;\r
69 UINTN Msbs;\r
70 UINTN StartBit;\r
71 UINTN EndBit;\r
72\r
73 StartBit = (UINTN)GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address);\r
74 EndBit = (StartBit + BitNumber - 1) % GUARDED_HEAP_MAP_ENTRY_BITS;\r
75\r
883787a2 76 if ((StartBit + BitNumber) >= GUARDED_HEAP_MAP_ENTRY_BITS) {\r
e63da9f0
JW
77 Msbs = (GUARDED_HEAP_MAP_ENTRY_BITS - StartBit) %\r
78 GUARDED_HEAP_MAP_ENTRY_BITS;\r
79 Lsbs = (EndBit + 1) % GUARDED_HEAP_MAP_ENTRY_BITS;\r
80 Qwords = (BitNumber - Msbs) / GUARDED_HEAP_MAP_ENTRY_BITS;\r
81 } else {\r
82 Msbs = BitNumber;\r
83 Lsbs = 0;\r
84 Qwords = 0;\r
85 }\r
86\r
87 if (Msbs > 0) {\r
88 *BitMap |= LShiftU64 (LShiftU64 (1, Msbs) - 1, StartBit);\r
89 BitMap += 1;\r
90 }\r
91\r
92 if (Qwords > 0) {\r
93 SetMem64 ((VOID *)BitMap, Qwords * GUARDED_HEAP_MAP_ENTRY_BYTES,\r
94 (UINT64)-1);\r
95 BitMap += Qwords;\r
96 }\r
97\r
98 if (Lsbs > 0) {\r
99 *BitMap |= (LShiftU64 (1, Lsbs) - 1);\r
100 }\r
101}\r
102\r
103/**\r
104 Set corresponding bits in bitmap table to 0 according to the address.\r
105\r
106 @param[in] Address Start address to set for.\r
107 @param[in] BitNumber Number of bits to set.\r
108 @param[in] BitMap Pointer to bitmap which covers the Address.\r
109\r
110 @return VOID.\r
111**/\r
112STATIC\r
113VOID\r
114ClearBits (\r
115 IN EFI_PHYSICAL_ADDRESS Address,\r
116 IN UINTN BitNumber,\r
117 IN UINT64 *BitMap\r
118 )\r
119{\r
120 UINTN Lsbs;\r
121 UINTN Qwords;\r
122 UINTN Msbs;\r
123 UINTN StartBit;\r
124 UINTN EndBit;\r
125\r
126 StartBit = (UINTN)GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address);\r
127 EndBit = (StartBit + BitNumber - 1) % GUARDED_HEAP_MAP_ENTRY_BITS;\r
128\r
883787a2 129 if ((StartBit + BitNumber) >= GUARDED_HEAP_MAP_ENTRY_BITS) {\r
e63da9f0
JW
130 Msbs = (GUARDED_HEAP_MAP_ENTRY_BITS - StartBit) %\r
131 GUARDED_HEAP_MAP_ENTRY_BITS;\r
132 Lsbs = (EndBit + 1) % GUARDED_HEAP_MAP_ENTRY_BITS;\r
133 Qwords = (BitNumber - Msbs) / GUARDED_HEAP_MAP_ENTRY_BITS;\r
134 } else {\r
135 Msbs = BitNumber;\r
136 Lsbs = 0;\r
137 Qwords = 0;\r
138 }\r
139\r
140 if (Msbs > 0) {\r
141 *BitMap &= ~LShiftU64 (LShiftU64 (1, Msbs) - 1, StartBit);\r
142 BitMap += 1;\r
143 }\r
144\r
145 if (Qwords > 0) {\r
146 SetMem64 ((VOID *)BitMap, Qwords * GUARDED_HEAP_MAP_ENTRY_BYTES, 0);\r
147 BitMap += Qwords;\r
148 }\r
149\r
150 if (Lsbs > 0) {\r
151 *BitMap &= ~(LShiftU64 (1, Lsbs) - 1);\r
152 }\r
153}\r
154\r
155/**\r
156 Get corresponding bits in bitmap table according to the address.\r
157\r
158 The value of bit 0 corresponds to the status of memory at given Address.\r
159 No more than 64 bits can be retrieved in one call.\r
160\r
161 @param[in] Address Start address to retrieve bits for.\r
162 @param[in] BitNumber Number of bits to get.\r
163 @param[in] BitMap Pointer to bitmap which covers the Address.\r
164\r
165 @return An integer containing the bits information.\r
166**/\r
167STATIC\r
168UINT64\r
169GetBits (\r
170 IN EFI_PHYSICAL_ADDRESS Address,\r
171 IN UINTN BitNumber,\r
172 IN UINT64 *BitMap\r
173 )\r
174{\r
175 UINTN StartBit;\r
176 UINTN EndBit;\r
177 UINTN Lsbs;\r
178 UINTN Msbs;\r
179 UINT64 Result;\r
180\r
181 ASSERT (BitNumber <= GUARDED_HEAP_MAP_ENTRY_BITS);\r
182\r
183 StartBit = (UINTN)GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address);\r
184 EndBit = (StartBit + BitNumber - 1) % GUARDED_HEAP_MAP_ENTRY_BITS;\r
185\r
186 if ((StartBit + BitNumber) > GUARDED_HEAP_MAP_ENTRY_BITS) {\r
187 Msbs = GUARDED_HEAP_MAP_ENTRY_BITS - StartBit;\r
188 Lsbs = (EndBit + 1) % GUARDED_HEAP_MAP_ENTRY_BITS;\r
189 } else {\r
190 Msbs = BitNumber;\r
191 Lsbs = 0;\r
192 }\r
193\r
883787a2
JW
194 if (StartBit == 0 && BitNumber == GUARDED_HEAP_MAP_ENTRY_BITS) {\r
195 Result = *BitMap;\r
196 } else {\r
197 Result = RShiftU64((*BitMap), StartBit) & (LShiftU64(1, Msbs) - 1);\r
198 if (Lsbs > 0) {\r
199 BitMap += 1;\r
200 Result |= LShiftU64 ((*BitMap) & (LShiftU64 (1, Lsbs) - 1), Msbs);\r
201 }\r
e63da9f0
JW
202 }\r
203\r
204 return Result;\r
205}\r
206\r
207/**\r
208 Helper function to allocate pages without Guard for internal uses.\r
209\r
210 @param[in] Pages Page number.\r
211\r
212 @return Address of memory allocated.\r
213**/\r
214VOID *\r
215PageAlloc (\r
216 IN UINTN Pages\r
217 )\r
218{\r
219 EFI_STATUS Status;\r
220 EFI_PHYSICAL_ADDRESS Memory;\r
221\r
222 Status = SmmInternalAllocatePages (AllocateAnyPages, EfiRuntimeServicesData,\r
223 Pages, &Memory, FALSE);\r
224 if (EFI_ERROR (Status)) {\r
225 Memory = 0;\r
226 }\r
227\r
228 return (VOID *)(UINTN)Memory;\r
229}\r
230\r
231/**\r
232 Locate the pointer of bitmap from the guarded memory bitmap tables, which\r
233 covers the given Address.\r
234\r
235 @param[in] Address Start address to search the bitmap for.\r
236 @param[in] AllocMapUnit Flag to indicate memory allocation for the table.\r
237 @param[out] BitMap Pointer to bitmap which covers the Address.\r
238\r
239 @return The bit number from given Address to the end of current map table.\r
240**/\r
241UINTN\r
242FindGuardedMemoryMap (\r
243 IN EFI_PHYSICAL_ADDRESS Address,\r
244 IN BOOLEAN AllocMapUnit,\r
245 OUT UINT64 **BitMap\r
246 )\r
247{\r
248 UINTN Level;\r
249 UINT64 *GuardMap;\r
250 UINT64 MapMemory;\r
251 UINTN Index;\r
252 UINTN Size;\r
253 UINTN BitsToUnitEnd;\r
254\r
255 //\r
256 // Adjust current map table depth according to the address to access\r
257 //\r
12957e56
JW
258 while (AllocMapUnit &&\r
259 mMapLevel < GUARDED_HEAP_MAP_TABLE_DEPTH &&\r
e63da9f0
JW
260 RShiftU64 (\r
261 Address,\r
262 mLevelShift[GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel - 1]\r
263 ) != 0) {\r
264\r
265 if (mGuardedMemoryMap != 0) {\r
266 Size = (mLevelMask[GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel - 1] + 1)\r
267 * GUARDED_HEAP_MAP_ENTRY_BYTES;\r
268 MapMemory = (UINT64)(UINTN)PageAlloc (EFI_SIZE_TO_PAGES (Size));\r
269 ASSERT (MapMemory != 0);\r
270\r
271 SetMem ((VOID *)(UINTN)MapMemory, Size, 0);\r
272\r
273 *(UINT64 *)(UINTN)MapMemory = mGuardedMemoryMap;\r
274 mGuardedMemoryMap = MapMemory;\r
275 }\r
276\r
277 mMapLevel++;\r
278\r
279 }\r
280\r
281 GuardMap = &mGuardedMemoryMap;\r
282 for (Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;\r
283 Level < GUARDED_HEAP_MAP_TABLE_DEPTH;\r
284 ++Level) {\r
285\r
286 if (*GuardMap == 0) {\r
287 if (!AllocMapUnit) {\r
288 GuardMap = NULL;\r
289 break;\r
290 }\r
291\r
292 Size = (mLevelMask[Level] + 1) * GUARDED_HEAP_MAP_ENTRY_BYTES;\r
293 MapMemory = (UINT64)(UINTN)PageAlloc (EFI_SIZE_TO_PAGES (Size));\r
294 ASSERT (MapMemory != 0);\r
295\r
296 SetMem ((VOID *)(UINTN)MapMemory, Size, 0);\r
297 *GuardMap = MapMemory;\r
298 }\r
299\r
300 Index = (UINTN)RShiftU64 (Address, mLevelShift[Level]);\r
301 Index &= mLevelMask[Level];\r
302 GuardMap = (UINT64 *)(UINTN)((*GuardMap) + Index * sizeof (UINT64));\r
303\r
304 }\r
305\r
306 BitsToUnitEnd = GUARDED_HEAP_MAP_BITS - GUARDED_HEAP_MAP_BIT_INDEX (Address);\r
307 *BitMap = GuardMap;\r
308\r
309 return BitsToUnitEnd;\r
310}\r
311\r
312/**\r
313 Set corresponding bits in bitmap table to 1 according to given memory range.\r
314\r
315 @param[in] Address Memory address to guard from.\r
316 @param[in] NumberOfPages Number of pages to guard.\r
317\r
318 @return VOID\r
319**/\r
320VOID\r
321EFIAPI\r
322SetGuardedMemoryBits (\r
323 IN EFI_PHYSICAL_ADDRESS Address,\r
324 IN UINTN NumberOfPages\r
325 )\r
326{\r
327 UINT64 *BitMap;\r
328 UINTN Bits;\r
329 UINTN BitsToUnitEnd;\r
330\r
331 while (NumberOfPages > 0) {\r
332 BitsToUnitEnd = FindGuardedMemoryMap (Address, TRUE, &BitMap);\r
333 ASSERT (BitMap != NULL);\r
334\r
335 if (NumberOfPages > BitsToUnitEnd) {\r
336 // Cross map unit\r
337 Bits = BitsToUnitEnd;\r
338 } else {\r
339 Bits = NumberOfPages;\r
340 }\r
341\r
342 SetBits (Address, Bits, BitMap);\r
343\r
344 NumberOfPages -= Bits;\r
345 Address += EFI_PAGES_TO_SIZE (Bits);\r
346 }\r
347}\r
348\r
349/**\r
350 Clear corresponding bits in bitmap table according to given memory range.\r
351\r
352 @param[in] Address Memory address to unset from.\r
353 @param[in] NumberOfPages Number of pages to unset guard.\r
354\r
355 @return VOID\r
356**/\r
357VOID\r
358EFIAPI\r
359ClearGuardedMemoryBits (\r
360 IN EFI_PHYSICAL_ADDRESS Address,\r
361 IN UINTN NumberOfPages\r
362 )\r
363{\r
364 UINT64 *BitMap;\r
365 UINTN Bits;\r
366 UINTN BitsToUnitEnd;\r
367\r
368 while (NumberOfPages > 0) {\r
369 BitsToUnitEnd = FindGuardedMemoryMap (Address, TRUE, &BitMap);\r
370 ASSERT (BitMap != NULL);\r
371\r
372 if (NumberOfPages > BitsToUnitEnd) {\r
373 // Cross map unit\r
374 Bits = BitsToUnitEnd;\r
375 } else {\r
376 Bits = NumberOfPages;\r
377 }\r
378\r
379 ClearBits (Address, Bits, BitMap);\r
380\r
381 NumberOfPages -= Bits;\r
382 Address += EFI_PAGES_TO_SIZE (Bits);\r
383 }\r
384}\r
385\r
386/**\r
387 Retrieve corresponding bits in bitmap table according to given memory range.\r
388\r
389 @param[in] Address Memory address to retrieve from.\r
390 @param[in] NumberOfPages Number of pages to retrieve.\r
391\r
392 @return An integer containing the guarded memory bitmap.\r
393**/\r
394UINTN\r
395GetGuardedMemoryBits (\r
396 IN EFI_PHYSICAL_ADDRESS Address,\r
397 IN UINTN NumberOfPages\r
398 )\r
399{\r
400 UINT64 *BitMap;\r
401 UINTN Bits;\r
402 UINTN Result;\r
403 UINTN Shift;\r
404 UINTN BitsToUnitEnd;\r
405\r
406 ASSERT (NumberOfPages <= GUARDED_HEAP_MAP_ENTRY_BITS);\r
407\r
408 Result = 0;\r
409 Shift = 0;\r
410 while (NumberOfPages > 0) {\r
411 BitsToUnitEnd = FindGuardedMemoryMap (Address, FALSE, &BitMap);\r
412\r
413 if (NumberOfPages > BitsToUnitEnd) {\r
414 // Cross map unit\r
415 Bits = BitsToUnitEnd;\r
416 } else {\r
417 Bits = NumberOfPages;\r
418 }\r
419\r
420 if (BitMap != NULL) {\r
421 Result |= LShiftU64 (GetBits (Address, Bits, BitMap), Shift);\r
422 }\r
423\r
424 Shift += Bits;\r
425 NumberOfPages -= Bits;\r
426 Address += EFI_PAGES_TO_SIZE (Bits);\r
427 }\r
428\r
429 return Result;\r
430}\r
431\r
432/**\r
433 Get bit value in bitmap table for the given address.\r
434\r
435 @param[in] Address The address to retrieve for.\r
436\r
437 @return 1 or 0.\r
438**/\r
439UINTN\r
440EFIAPI\r
441GetGuardMapBit (\r
442 IN EFI_PHYSICAL_ADDRESS Address\r
443 )\r
444{\r
445 UINT64 *GuardMap;\r
446\r
447 FindGuardedMemoryMap (Address, FALSE, &GuardMap);\r
448 if (GuardMap != NULL) {\r
449 if (RShiftU64 (*GuardMap,\r
450 GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address)) & 1) {\r
451 return 1;\r
452 }\r
453 }\r
454\r
455 return 0;\r
456}\r
457\r
458/**\r
459 Set the bit in bitmap table for the given address.\r
460\r
461 @param[in] Address The address to set for.\r
462\r
463 @return VOID.\r
464**/\r
465VOID\r
466EFIAPI\r
467SetGuardMapBit (\r
468 IN EFI_PHYSICAL_ADDRESS Address\r
469 )\r
470{\r
471 UINT64 *GuardMap;\r
472 UINT64 BitMask;\r
473\r
474 FindGuardedMemoryMap (Address, TRUE, &GuardMap);\r
475 if (GuardMap != NULL) {\r
476 BitMask = LShiftU64 (1, GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address));\r
477 *GuardMap |= BitMask;\r
478 }\r
479}\r
480\r
481/**\r
482 Clear the bit in bitmap table for the given address.\r
483\r
484 @param[in] Address The address to clear for.\r
485\r
486 @return VOID.\r
487**/\r
488VOID\r
489EFIAPI\r
490ClearGuardMapBit (\r
491 IN EFI_PHYSICAL_ADDRESS Address\r
492 )\r
493{\r
494 UINT64 *GuardMap;\r
495 UINT64 BitMask;\r
496\r
497 FindGuardedMemoryMap (Address, TRUE, &GuardMap);\r
498 if (GuardMap != NULL) {\r
499 BitMask = LShiftU64 (1, GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address));\r
500 *GuardMap &= ~BitMask;\r
501 }\r
502}\r
503\r
504/**\r
505 Check to see if the page at the given address is a Guard page or not.\r
506\r
507 @param[in] Address The address to check for.\r
508\r
509 @return TRUE The page at Address is a Guard page.\r
510 @return FALSE The page at Address is not a Guard page.\r
511**/\r
512BOOLEAN\r
513EFIAPI\r
514IsGuardPage (\r
515 IN EFI_PHYSICAL_ADDRESS Address\r
516)\r
517{\r
518 UINTN BitMap;\r
519\r
520 //\r
521 // There must be at least one guarded page before and/or after given\r
522 // address if it's a Guard page. The bitmap pattern should be one of\r
523 // 001, 100 and 101\r
524 //\r
525 BitMap = GetGuardedMemoryBits (Address - EFI_PAGE_SIZE, 3);\r
526 return ((BitMap == BIT0) || (BitMap == BIT2) || (BitMap == (BIT2 | BIT0)));\r
527}\r
528\r
529/**\r
530 Check to see if the page at the given address is a head Guard page or not.\r
531\r
532 @param[in] Address The address to check for.\r
533\r
534 @return TRUE The page at Address is a head Guard page.\r
535 @return FALSE The page at Address is not a head Guard page.\r
536**/\r
537BOOLEAN\r
538EFIAPI\r
539IsHeadGuard (\r
540 IN EFI_PHYSICAL_ADDRESS Address\r
541 )\r
542{\r
543 return (GetGuardedMemoryBits (Address, 2) == BIT1);\r
544}\r
545\r
546/**\r
547 Check to see if the page at the given address is a tail Guard page or not.\r
548\r
549 @param[in] Address The address to check for.\r
550\r
551 @return TRUE The page at Address is a tail Guard page.\r
552 @return FALSE The page at Address is not a tail Guard page.\r
553**/\r
554BOOLEAN\r
555EFIAPI\r
556IsTailGuard (\r
557 IN EFI_PHYSICAL_ADDRESS Address\r
558 )\r
559{\r
560 return (GetGuardedMemoryBits (Address - EFI_PAGE_SIZE, 2) == BIT0);\r
561}\r
562\r
563/**\r
564 Check to see if the page at the given address is guarded or not.\r
565\r
566 @param[in] Address The address to check for.\r
567\r
568 @return TRUE The page at Address is guarded.\r
569 @return FALSE The page at Address is not guarded.\r
570**/\r
571BOOLEAN\r
572EFIAPI\r
573IsMemoryGuarded (\r
574 IN EFI_PHYSICAL_ADDRESS Address\r
575 )\r
576{\r
577 return (GetGuardMapBit (Address) == 1);\r
578}\r
579\r
580/**\r
581 Set the page at the given address to be a Guard page.\r
582\r
583 This is done by changing the page table attribute to be NOT PRSENT.\r
584\r
585 @param[in] BaseAddress Page address to Guard at.\r
586\r
587 @return VOID.\r
588**/\r
589VOID\r
590EFIAPI\r
591SetGuardPage (\r
592 IN EFI_PHYSICAL_ADDRESS BaseAddress\r
593 )\r
594{\r
595 if (mSmmMemoryAttribute != NULL) {\r
596 mOnGuarding = TRUE;\r
597 mSmmMemoryAttribute->SetMemoryAttributes (\r
598 mSmmMemoryAttribute,\r
599 BaseAddress,\r
600 EFI_PAGE_SIZE,\r
601 EFI_MEMORY_RP\r
602 );\r
603 mOnGuarding = FALSE;\r
604 }\r
605}\r
606\r
607/**\r
608 Unset the Guard page at the given address to the normal memory.\r
609\r
610 This is done by changing the page table attribute to be PRSENT.\r
611\r
612 @param[in] BaseAddress Page address to Guard at.\r
613\r
614 @return VOID.\r
615**/\r
616VOID\r
617EFIAPI\r
618UnsetGuardPage (\r
619 IN EFI_PHYSICAL_ADDRESS BaseAddress\r
620 )\r
621{\r
622 if (mSmmMemoryAttribute != NULL) {\r
623 mOnGuarding = TRUE;\r
624 mSmmMemoryAttribute->ClearMemoryAttributes (\r
625 mSmmMemoryAttribute,\r
626 BaseAddress,\r
627 EFI_PAGE_SIZE,\r
628 EFI_MEMORY_RP\r
629 );\r
630 mOnGuarding = FALSE;\r
631 }\r
632}\r
633\r
634/**\r
635 Check to see if the memory at the given address should be guarded or not.\r
636\r
637 @param[in] MemoryType Memory type to check.\r
638 @param[in] AllocateType Allocation type to check.\r
639 @param[in] PageOrPool Indicate a page allocation or pool allocation.\r
640\r
641\r
642 @return TRUE The given type of memory should be guarded.\r
643 @return FALSE The given type of memory should not be guarded.\r
644**/\r
645BOOLEAN\r
646IsMemoryTypeToGuard (\r
647 IN EFI_MEMORY_TYPE MemoryType,\r
648 IN EFI_ALLOCATE_TYPE AllocateType,\r
649 IN UINT8 PageOrPool\r
650 )\r
651{\r
652 UINT64 TestBit;\r
653 UINT64 ConfigBit;\r
654\r
655 if ((PcdGet8 (PcdHeapGuardPropertyMask) & PageOrPool) == 0\r
656 || mOnGuarding\r
657 || AllocateType == AllocateAddress) {\r
658 return FALSE;\r
659 }\r
660\r
661 ConfigBit = 0;\r
662 if ((PageOrPool & GUARD_HEAP_TYPE_POOL) != 0) {\r
663 ConfigBit |= PcdGet64 (PcdHeapGuardPoolType);\r
664 }\r
665\r
666 if ((PageOrPool & GUARD_HEAP_TYPE_PAGE) != 0) {\r
667 ConfigBit |= PcdGet64 (PcdHeapGuardPageType);\r
668 }\r
669\r
670 if (MemoryType == EfiRuntimeServicesData ||\r
671 MemoryType == EfiRuntimeServicesCode) {\r
672 TestBit = LShiftU64 (1, MemoryType);\r
673 } else if (MemoryType == EfiMaxMemoryType) {\r
674 TestBit = (UINT64)-1;\r
675 } else {\r
676 TestBit = 0;\r
677 }\r
678\r
679 return ((ConfigBit & TestBit) != 0);\r
680}\r
681\r
682/**\r
683 Check to see if the pool at the given address should be guarded or not.\r
684\r
685 @param[in] MemoryType Pool type to check.\r
686\r
687\r
688 @return TRUE The given type of pool should be guarded.\r
689 @return FALSE The given type of pool should not be guarded.\r
690**/\r
691BOOLEAN\r
692IsPoolTypeToGuard (\r
693 IN EFI_MEMORY_TYPE MemoryType\r
694 )\r
695{\r
696 return IsMemoryTypeToGuard (MemoryType, AllocateAnyPages,\r
697 GUARD_HEAP_TYPE_POOL);\r
698}\r
699\r
700/**\r
701 Check to see if the page at the given address should be guarded or not.\r
702\r
703 @param[in] MemoryType Page type to check.\r
704 @param[in] AllocateType Allocation type to check.\r
705\r
706 @return TRUE The given type of page should be guarded.\r
707 @return FALSE The given type of page should not be guarded.\r
708**/\r
709BOOLEAN\r
710IsPageTypeToGuard (\r
711 IN EFI_MEMORY_TYPE MemoryType,\r
712 IN EFI_ALLOCATE_TYPE AllocateType\r
713 )\r
714{\r
715 return IsMemoryTypeToGuard (MemoryType, AllocateType, GUARD_HEAP_TYPE_PAGE);\r
716}\r
717\r
718/**\r
719 Check to see if the heap guard is enabled for page and/or pool allocation.\r
720\r
721 @return TRUE/FALSE.\r
722**/\r
723BOOLEAN\r
724IsHeapGuardEnabled (\r
725 VOID\r
726 )\r
727{\r
728 return IsMemoryTypeToGuard (EfiMaxMemoryType, AllocateAnyPages,\r
729 GUARD_HEAP_TYPE_POOL|GUARD_HEAP_TYPE_PAGE);\r
730}\r
731\r
732/**\r
733 Set head Guard and tail Guard for the given memory range.\r
734\r
735 @param[in] Memory Base address of memory to set guard for.\r
736 @param[in] NumberOfPages Memory size in pages.\r
737\r
738 @return VOID.\r
739**/\r
740VOID\r
741SetGuardForMemory (\r
742 IN EFI_PHYSICAL_ADDRESS Memory,\r
743 IN UINTN NumberOfPages\r
744 )\r
745{\r
746 EFI_PHYSICAL_ADDRESS GuardPage;\r
747\r
748 //\r
749 // Set tail Guard\r
750 //\r
751 GuardPage = Memory + EFI_PAGES_TO_SIZE (NumberOfPages);\r
752 if (!IsGuardPage (GuardPage)) {\r
753 SetGuardPage (GuardPage);\r
754 }\r
755\r
756 // Set head Guard\r
757 GuardPage = Memory - EFI_PAGES_TO_SIZE (1);\r
758 if (!IsGuardPage (GuardPage)) {\r
759 SetGuardPage (GuardPage);\r
760 }\r
761\r
762 //\r
763 // Mark the memory range as Guarded\r
764 //\r
765 SetGuardedMemoryBits (Memory, NumberOfPages);\r
766}\r
767\r
768/**\r
769 Unset head Guard and tail Guard for the given memory range.\r
770\r
771 @param[in] Memory Base address of memory to unset guard for.\r
772 @param[in] NumberOfPages Memory size in pages.\r
773\r
774 @return VOID.\r
775**/\r
776VOID\r
777UnsetGuardForMemory (\r
778 IN EFI_PHYSICAL_ADDRESS Memory,\r
779 IN UINTN NumberOfPages\r
780 )\r
781{\r
782 EFI_PHYSICAL_ADDRESS GuardPage;\r
38d870fc 783 UINT64 GuardBitmap;\r
e63da9f0
JW
784\r
785 if (NumberOfPages == 0) {\r
786 return;\r
787 }\r
788\r
789 //\r
790 // Head Guard must be one page before, if any.\r
791 //\r
38d870fc
JW
792 // MSB-> 1 0 <-LSB\r
793 // -------------------\r
794 // Head Guard -> 0 1 -> Don't free Head Guard (shared Guard)\r
795 // Head Guard -> 0 0 -> Free Head Guard either (not shared Guard)\r
796 // 1 X -> Don't free first page (need a new Guard)\r
797 // (it'll be turned into a Guard page later)\r
798 // -------------------\r
799 // Start -> -1 -2\r
800 //\r
e63da9f0 801 GuardPage = Memory - EFI_PAGES_TO_SIZE (1);\r
38d870fc
JW
802 GuardBitmap = GetGuardedMemoryBits (Memory - EFI_PAGES_TO_SIZE (2), 2);\r
803 if ((GuardBitmap & BIT1) == 0) {\r
804 //\r
805 // Head Guard exists.\r
806 //\r
807 if ((GuardBitmap & BIT0) == 0) {\r
e63da9f0
JW
808 //\r
809 // If the head Guard is not a tail Guard of adjacent memory block,\r
810 // unset it.\r
811 //\r
812 UnsetGuardPage (GuardPage);\r
813 }\r
38d870fc 814 } else {\r
e63da9f0
JW
815 //\r
816 // Pages before memory to free are still in Guard. It's a partial free\r
817 // case. Turn first page of memory block to free into a new Guard.\r
818 //\r
819 SetGuardPage (Memory);\r
820 }\r
821\r
822 //\r
823 // Tail Guard must be the page after this memory block to free, if any.\r
824 //\r
38d870fc
JW
825 // MSB-> 1 0 <-LSB\r
826 // --------------------\r
827 // 1 0 <- Tail Guard -> Don't free Tail Guard (shared Guard)\r
828 // 0 0 <- Tail Guard -> Free Tail Guard either (not shared Guard)\r
829 // X 1 -> Don't free last page (need a new Guard)\r
830 // (it'll be turned into a Guard page later)\r
831 // --------------------\r
832 // +1 +0 <- End\r
833 //\r
e63da9f0 834 GuardPage = Memory + EFI_PAGES_TO_SIZE (NumberOfPages);\r
38d870fc
JW
835 GuardBitmap = GetGuardedMemoryBits (GuardPage, 2);\r
836 if ((GuardBitmap & BIT0) == 0) {\r
837 //\r
838 // Tail Guard exists.\r
839 //\r
840 if ((GuardBitmap & BIT1) == 0) {\r
e63da9f0
JW
841 //\r
842 // If the tail Guard is not a head Guard of adjacent memory block,\r
843 // free it; otherwise, keep it.\r
844 //\r
845 UnsetGuardPage (GuardPage);\r
846 }\r
38d870fc 847 } else {\r
e63da9f0
JW
848 //\r
849 // Pages after memory to free are still in Guard. It's a partial free\r
850 // case. We need to keep one page to be a head Guard.\r
851 //\r
852 SetGuardPage (GuardPage - EFI_PAGES_TO_SIZE (1));\r
853 }\r
854\r
855 //\r
856 // No matter what, we just clear the mark of the Guarded memory.\r
857 //\r
858 ClearGuardedMemoryBits(Memory, NumberOfPages);\r
859}\r
860\r
861/**\r
862 Adjust address of free memory according to existing and/or required Guard.\r
863\r
864 This function will check if there're existing Guard pages of adjacent\r
865 memory blocks, and try to use it as the Guard page of the memory to be\r
866 allocated.\r
867\r
868 @param[in] Start Start address of free memory block.\r
869 @param[in] Size Size of free memory block.\r
870 @param[in] SizeRequested Size of memory to allocate.\r
871\r
872 @return The end address of memory block found.\r
873 @return 0 if no enough space for the required size of memory and its Guard.\r
874**/\r
875UINT64\r
876AdjustMemoryS (\r
877 IN UINT64 Start,\r
878 IN UINT64 Size,\r
879 IN UINT64 SizeRequested\r
880 )\r
881{\r
882 UINT64 Target;\r
883\r
c44218e5
JW
884 //\r
885 // UEFI spec requires that allocated pool must be 8-byte aligned. If it's\r
886 // indicated to put the pool near the Tail Guard, we need extra bytes to\r
887 // make sure alignment of the returned pool address.\r
888 //\r
889 if ((PcdGet8 (PcdHeapGuardPropertyMask) & BIT7) == 0) {\r
890 SizeRequested = ALIGN_VALUE(SizeRequested, 8);\r
891 }\r
892\r
e63da9f0 893 Target = Start + Size - SizeRequested;\r
12957e56
JW
894 ASSERT (Target >= Start);\r
895 if (Target == 0) {\r
896 return 0;\r
897 }\r
e63da9f0 898\r
e63da9f0
JW
899 if (!IsGuardPage (Start + Size)) {\r
900 // No Guard at tail to share. One more page is needed.\r
901 Target -= EFI_PAGES_TO_SIZE (1);\r
902 }\r
903\r
904 // Out of range?\r
905 if (Target < Start) {\r
906 return 0;\r
907 }\r
908\r
909 // At the edge?\r
910 if (Target == Start) {\r
911 if (!IsGuardPage (Target - EFI_PAGES_TO_SIZE (1))) {\r
912 // No enough space for a new head Guard if no Guard at head to share.\r
913 return 0;\r
914 }\r
915 }\r
916\r
917 // OK, we have enough pages for memory and its Guards. Return the End of the\r
918 // free space.\r
919 return Target + SizeRequested - 1;\r
920}\r
921\r
922/**\r
923 Adjust the start address and number of pages to free according to Guard.\r
924\r
925 The purpose of this function is to keep the shared Guard page with adjacent\r
926 memory block if it's still in guard, or free it if no more sharing. Another\r
927 is to reserve pages as Guard pages in partial page free situation.\r
928\r
929 @param[in,out] Memory Base address of memory to free.\r
930 @param[in,out] NumberOfPages Size of memory to free.\r
931\r
932 @return VOID.\r
933**/\r
934VOID\r
935AdjustMemoryF (\r
936 IN OUT EFI_PHYSICAL_ADDRESS *Memory,\r
937 IN OUT UINTN *NumberOfPages\r
938 )\r
939{\r
940 EFI_PHYSICAL_ADDRESS Start;\r
941 EFI_PHYSICAL_ADDRESS MemoryToTest;\r
942 UINTN PagesToFree;\r
38d870fc 943 UINT64 GuardBitmap;\r
e63da9f0
JW
944\r
945 if (Memory == NULL || NumberOfPages == NULL || *NumberOfPages == 0) {\r
946 return;\r
947 }\r
948\r
949 Start = *Memory;\r
950 PagesToFree = *NumberOfPages;\r
951\r
952 //\r
953 // Head Guard must be one page before, if any.\r
954 //\r
38d870fc
JW
955 // MSB-> 1 0 <-LSB\r
956 // -------------------\r
957 // Head Guard -> 0 1 -> Don't free Head Guard (shared Guard)\r
958 // Head Guard -> 0 0 -> Free Head Guard either (not shared Guard)\r
959 // 1 X -> Don't free first page (need a new Guard)\r
960 // (it'll be turned into a Guard page later)\r
961 // -------------------\r
962 // Start -> -1 -2\r
963 //\r
964 MemoryToTest = Start - EFI_PAGES_TO_SIZE (2);\r
965 GuardBitmap = GetGuardedMemoryBits (MemoryToTest, 2);\r
966 if ((GuardBitmap & BIT1) == 0) {\r
967 //\r
968 // Head Guard exists.\r
969 //\r
970 if ((GuardBitmap & BIT0) == 0) {\r
e63da9f0
JW
971 //\r
972 // If the head Guard is not a tail Guard of adjacent memory block,\r
973 // free it; otherwise, keep it.\r
974 //\r
975 Start -= EFI_PAGES_TO_SIZE (1);\r
976 PagesToFree += 1;\r
977 }\r
38d870fc 978 } else {\r
e63da9f0 979 //\r
38d870fc
JW
980 // No Head Guard, and pages before memory to free are still in Guard. It's a\r
981 // partial free case. We need to keep one page to be a tail Guard.\r
e63da9f0
JW
982 //\r
983 Start += EFI_PAGES_TO_SIZE (1);\r
984 PagesToFree -= 1;\r
985 }\r
986\r
987 //\r
988 // Tail Guard must be the page after this memory block to free, if any.\r
989 //\r
38d870fc
JW
990 // MSB-> 1 0 <-LSB\r
991 // --------------------\r
992 // 1 0 <- Tail Guard -> Don't free Tail Guard (shared Guard)\r
993 // 0 0 <- Tail Guard -> Free Tail Guard either (not shared Guard)\r
994 // X 1 -> Don't free last page (need a new Guard)\r
995 // (it'll be turned into a Guard page later)\r
996 // --------------------\r
997 // +1 +0 <- End\r
998 //\r
e63da9f0 999 MemoryToTest = Start + EFI_PAGES_TO_SIZE (PagesToFree);\r
38d870fc
JW
1000 GuardBitmap = GetGuardedMemoryBits (MemoryToTest, 2);\r
1001 if ((GuardBitmap & BIT0) == 0) {\r
1002 //\r
1003 // Tail Guard exists.\r
1004 //\r
1005 if ((GuardBitmap & BIT1) == 0) {\r
e63da9f0
JW
1006 //\r
1007 // If the tail Guard is not a head Guard of adjacent memory block,\r
1008 // free it; otherwise, keep it.\r
1009 //\r
1010 PagesToFree += 1;\r
1011 }\r
38d870fc 1012 } else if (PagesToFree > 0) {\r
e63da9f0 1013 //\r
38d870fc
JW
1014 // No Tail Guard, and pages after memory to free are still in Guard. It's a\r
1015 // partial free case. We need to keep one page to be a head Guard.\r
e63da9f0
JW
1016 //\r
1017 PagesToFree -= 1;\r
1018 }\r
1019\r
1020 *Memory = Start;\r
1021 *NumberOfPages = PagesToFree;\r
1022}\r
1023\r
1024/**\r
1025 Adjust the base and number of pages to really allocate according to Guard.\r
1026\r
1027 @param[in,out] Memory Base address of free memory.\r
1028 @param[in,out] NumberOfPages Size of memory to allocate.\r
1029\r
1030 @return VOID.\r
1031**/\r
1032VOID\r
1033AdjustMemoryA (\r
1034 IN OUT EFI_PHYSICAL_ADDRESS *Memory,\r
1035 IN OUT UINTN *NumberOfPages\r
1036 )\r
1037{\r
1038 //\r
1039 // FindFreePages() has already taken the Guard into account. It's safe to\r
1040 // adjust the start address and/or number of pages here, to make sure that\r
1041 // the Guards are also "allocated".\r
1042 //\r
1043 if (!IsGuardPage (*Memory + EFI_PAGES_TO_SIZE (*NumberOfPages))) {\r
1044 // No tail Guard, add one.\r
1045 *NumberOfPages += 1;\r
1046 }\r
1047\r
1048 if (!IsGuardPage (*Memory - EFI_PAGE_SIZE)) {\r
1049 // No head Guard, add one.\r
1050 *Memory -= EFI_PAGE_SIZE;\r
1051 *NumberOfPages += 1;\r
1052 }\r
1053}\r
1054\r
1055/**\r
1056 Adjust the pool head position to make sure the Guard page is adjavent to\r
1057 pool tail or pool head.\r
1058\r
1059 @param[in] Memory Base address of memory allocated.\r
1060 @param[in] NoPages Number of pages actually allocated.\r
1061 @param[in] Size Size of memory requested.\r
1062 (plus pool head/tail overhead)\r
1063\r
1064 @return Address of pool head\r
1065**/\r
1066VOID *\r
1067AdjustPoolHeadA (\r
1068 IN EFI_PHYSICAL_ADDRESS Memory,\r
1069 IN UINTN NoPages,\r
1070 IN UINTN Size\r
1071 )\r
1072{\r
c44218e5 1073 if (Memory == 0 || (PcdGet8 (PcdHeapGuardPropertyMask) & BIT7) != 0) {\r
e63da9f0
JW
1074 //\r
1075 // Pool head is put near the head Guard\r
1076 //\r
1077 return (VOID *)(UINTN)Memory;\r
1078 }\r
1079\r
1080 //\r
1081 // Pool head is put near the tail Guard\r
1082 //\r
c44218e5 1083 Size = ALIGN_VALUE (Size, 8);\r
e63da9f0
JW
1084 return (VOID *)(UINTN)(Memory + EFI_PAGES_TO_SIZE (NoPages) - Size);\r
1085}\r
1086\r
1087/**\r
1088 Get the page base address according to pool head address.\r
1089\r
1090 @param[in] Memory Head address of pool to free.\r
1091\r
1092 @return Address of pool head.\r
1093**/\r
1094VOID *\r
1095AdjustPoolHeadF (\r
1096 IN EFI_PHYSICAL_ADDRESS Memory\r
1097 )\r
1098{\r
c44218e5 1099 if (Memory == 0 || (PcdGet8 (PcdHeapGuardPropertyMask) & BIT7) != 0) {\r
e63da9f0
JW
1100 //\r
1101 // Pool head is put near the head Guard\r
1102 //\r
1103 return (VOID *)(UINTN)Memory;\r
1104 }\r
1105\r
1106 //\r
1107 // Pool head is put near the tail Guard\r
1108 //\r
1109 return (VOID *)(UINTN)(Memory & ~EFI_PAGE_MASK);\r
1110}\r
1111\r
1112/**\r
1113 Helper function of memory allocation with Guard pages.\r
1114\r
1115 @param FreePageList The free page node.\r
1116 @param NumberOfPages Number of pages to be allocated.\r
1117 @param MaxAddress Request to allocate memory below this address.\r
1118 @param MemoryType Type of memory requested.\r
1119\r
1120 @return Memory address of allocated pages.\r
1121**/\r
1122UINTN\r
1123InternalAllocMaxAddressWithGuard (\r
1124 IN OUT LIST_ENTRY *FreePageList,\r
1125 IN UINTN NumberOfPages,\r
1126 IN UINTN MaxAddress,\r
1127 IN EFI_MEMORY_TYPE MemoryType\r
1128\r
1129 )\r
1130{\r
1131 LIST_ENTRY *Node;\r
1132 FREE_PAGE_LIST *Pages;\r
1133 UINTN PagesToAlloc;\r
1134 UINTN HeadGuard;\r
1135 UINTN TailGuard;\r
1136 UINTN Address;\r
1137\r
1138 for (Node = FreePageList->BackLink; Node != FreePageList;\r
1139 Node = Node->BackLink) {\r
1140 Pages = BASE_CR (Node, FREE_PAGE_LIST, Link);\r
1141 if (Pages->NumberOfPages >= NumberOfPages &&\r
1142 (UINTN)Pages + EFI_PAGES_TO_SIZE (NumberOfPages) - 1 <= MaxAddress) {\r
1143\r
1144 //\r
1145 // We may need 1 or 2 more pages for Guard. Check it out.\r
1146 //\r
1147 PagesToAlloc = NumberOfPages;\r
1148 TailGuard = (UINTN)Pages + EFI_PAGES_TO_SIZE (Pages->NumberOfPages);\r
1149 if (!IsGuardPage (TailGuard)) {\r
1150 //\r
1151 // Add one if no Guard at the end of current free memory block.\r
1152 //\r
1153 PagesToAlloc += 1;\r
1154 TailGuard = 0;\r
1155 }\r
1156\r
1157 HeadGuard = (UINTN)Pages +\r
1158 EFI_PAGES_TO_SIZE (Pages->NumberOfPages - PagesToAlloc) -\r
1159 EFI_PAGE_SIZE;\r
1160 if (!IsGuardPage (HeadGuard)) {\r
1161 //\r
1162 // Add one if no Guard at the page before the address to allocate\r
1163 //\r
1164 PagesToAlloc += 1;\r
1165 HeadGuard = 0;\r
1166 }\r
1167\r
1168 if (Pages->NumberOfPages < PagesToAlloc) {\r
1169 // Not enough space to allocate memory with Guards? Try next block.\r
1170 continue;\r
1171 }\r
1172\r
1173 Address = InternalAllocPagesOnOneNode (Pages, PagesToAlloc, MaxAddress);\r
1174 ConvertSmmMemoryMapEntry(MemoryType, Address, PagesToAlloc, FALSE);\r
1175 CoreFreeMemoryMapStack();\r
1176 if (HeadGuard == 0) {\r
1177 // Don't pass the Guard page to user.\r
1178 Address += EFI_PAGE_SIZE;\r
1179 }\r
1180 SetGuardForMemory (Address, NumberOfPages);\r
1181 return Address;\r
1182 }\r
1183 }\r
1184\r
1185 return (UINTN)(-1);\r
1186}\r
1187\r
1188/**\r
1189 Helper function of memory free with Guard pages.\r
1190\r
1191 @param[in] Memory Base address of memory being freed.\r
1192 @param[in] NumberOfPages The number of pages to free.\r
1193 @param[in] AddRegion If this memory is new added region.\r
1194\r
1195 @retval EFI_NOT_FOUND Could not find the entry that covers the range.\r
1196 @retval EFI_INVALID_PARAMETER Address not aligned, Address is zero or NumberOfPages is zero.\r
1197 @return EFI_SUCCESS Pages successfully freed.\r
1198**/\r
1199EFI_STATUS\r
1200SmmInternalFreePagesExWithGuard (\r
1201 IN EFI_PHYSICAL_ADDRESS Memory,\r
1202 IN UINTN NumberOfPages,\r
1203 IN BOOLEAN AddRegion\r
1204 )\r
1205{\r
1206 EFI_PHYSICAL_ADDRESS MemoryToFree;\r
1207 UINTN PagesToFree;\r
1208\r
7823611c
JW
1209 if (((Memory & EFI_PAGE_MASK) != 0) || (Memory == 0) || (NumberOfPages == 0)) {\r
1210 return EFI_INVALID_PARAMETER;\r
1211 }\r
1212\r
e63da9f0
JW
1213 MemoryToFree = Memory;\r
1214 PagesToFree = NumberOfPages;\r
1215\r
1216 AdjustMemoryF (&MemoryToFree, &PagesToFree);\r
1217 UnsetGuardForMemory (Memory, NumberOfPages);\r
38d870fc
JW
1218 if (PagesToFree == 0) {\r
1219 return EFI_SUCCESS;\r
1220 }\r
e63da9f0
JW
1221\r
1222 return SmmInternalFreePagesEx (MemoryToFree, PagesToFree, AddRegion);\r
1223}\r
1224\r
1225/**\r
1226 Set all Guard pages which cannot be set during the non-SMM mode time.\r
1227**/\r
1228VOID\r
1229SetAllGuardPages (\r
1230 VOID\r
1231 )\r
1232{\r
1233 UINTN Entries[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1234 UINTN Shifts[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1235 UINTN Indices[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1236 UINT64 Tables[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1237 UINT64 Addresses[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1238 UINT64 TableEntry;\r
1239 UINT64 Address;\r
1240 UINT64 GuardPage;\r
1241 INTN Level;\r
1242 UINTN Index;\r
1243 BOOLEAN OnGuarding;\r
1244\r
c6c50165
JW
1245 if (mGuardedMemoryMap == 0 ||\r
1246 mMapLevel == 0 ||\r
1247 mMapLevel > GUARDED_HEAP_MAP_TABLE_DEPTH) {\r
e63da9f0
JW
1248 return;\r
1249 }\r
1250\r
1251 CopyMem (Entries, mLevelMask, sizeof (Entries));\r
1252 CopyMem (Shifts, mLevelShift, sizeof (Shifts));\r
1253\r
1254 SetMem (Tables, sizeof(Tables), 0);\r
1255 SetMem (Addresses, sizeof(Addresses), 0);\r
1256 SetMem (Indices, sizeof(Indices), 0);\r
1257\r
1258 Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;\r
1259 Tables[Level] = mGuardedMemoryMap;\r
1260 Address = 0;\r
1261 OnGuarding = FALSE;\r
1262\r
1263 DEBUG_CODE (\r
1264 DumpGuardedMemoryBitmap ();\r
1265 );\r
1266\r
1267 while (TRUE) {\r
1268 if (Indices[Level] > Entries[Level]) {\r
1269 Tables[Level] = 0;\r
1270 Level -= 1;\r
1271 } else {\r
1272\r
1273 TableEntry = ((UINT64 *)(UINTN)(Tables[Level]))[Indices[Level]];\r
1274 Address = Addresses[Level];\r
1275\r
1276 if (TableEntry == 0) {\r
1277\r
1278 OnGuarding = FALSE;\r
1279\r
1280 } else if (Level < GUARDED_HEAP_MAP_TABLE_DEPTH - 1) {\r
1281\r
1282 Level += 1;\r
1283 Tables[Level] = TableEntry;\r
1284 Addresses[Level] = Address;\r
1285 Indices[Level] = 0;\r
1286\r
1287 continue;\r
1288\r
1289 } else {\r
1290\r
1291 Index = 0;\r
1292 while (Index < GUARDED_HEAP_MAP_ENTRY_BITS) {\r
1293 if ((TableEntry & 1) == 1) {\r
1294 if (OnGuarding) {\r
1295 GuardPage = 0;\r
1296 } else {\r
1297 GuardPage = Address - EFI_PAGE_SIZE;\r
1298 }\r
1299 OnGuarding = TRUE;\r
1300 } else {\r
1301 if (OnGuarding) {\r
1302 GuardPage = Address;\r
1303 } else {\r
1304 GuardPage = 0;\r
1305 }\r
1306 OnGuarding = FALSE;\r
1307 }\r
1308\r
1309 if (GuardPage != 0) {\r
1310 SetGuardPage (GuardPage);\r
1311 }\r
1312\r
1313 if (TableEntry == 0) {\r
1314 break;\r
1315 }\r
1316\r
1317 TableEntry = RShiftU64 (TableEntry, 1);\r
1318 Address += EFI_PAGE_SIZE;\r
1319 Index += 1;\r
1320 }\r
1321 }\r
1322 }\r
1323\r
1324 if (Level < (GUARDED_HEAP_MAP_TABLE_DEPTH - (INTN)mMapLevel)) {\r
1325 break;\r
1326 }\r
1327\r
1328 Indices[Level] += 1;\r
1329 Address = (Level == 0) ? 0 : Addresses[Level - 1];\r
1330 Addresses[Level] = Address | LShiftU64(Indices[Level], Shifts[Level]);\r
1331\r
1332 }\r
1333}\r
1334\r
1335/**\r
1336 Hook function used to set all Guard pages after entering SMM mode.\r
1337**/\r
1338VOID\r
1339SmmEntryPointMemoryManagementHook (\r
1340 VOID\r
1341 )\r
1342{\r
1343 EFI_STATUS Status;\r
1344\r
1345 if (mSmmMemoryAttribute == NULL) {\r
1346 Status = SmmLocateProtocol (\r
1347 &gEdkiiSmmMemoryAttributeProtocolGuid,\r
1348 NULL,\r
1349 (VOID **)&mSmmMemoryAttribute\r
1350 );\r
1351 if (!EFI_ERROR(Status)) {\r
1352 SetAllGuardPages ();\r
1353 }\r
1354 }\r
1355}\r
1356\r
1357/**\r
1358 Helper function to convert a UINT64 value in binary to a string.\r
1359\r
1360 @param[in] Value Value of a UINT64 integer.\r
1361 @param[out] BinString String buffer to contain the conversion result.\r
1362\r
1363 @return VOID.\r
1364**/\r
1365VOID\r
1366Uint64ToBinString (\r
1367 IN UINT64 Value,\r
1368 OUT CHAR8 *BinString\r
1369 )\r
1370{\r
1371 UINTN Index;\r
1372\r
1373 if (BinString == NULL) {\r
1374 return;\r
1375 }\r
1376\r
1377 for (Index = 64; Index > 0; --Index) {\r
1378 BinString[Index - 1] = '0' + (Value & 1);\r
1379 Value = RShiftU64 (Value, 1);\r
1380 }\r
1381 BinString[64] = '\0';\r
1382}\r
1383\r
1384/**\r
1385 Dump the guarded memory bit map.\r
1386**/\r
1387VOID\r
1388EFIAPI\r
1389DumpGuardedMemoryBitmap (\r
1390 VOID\r
1391 )\r
1392{\r
1393 UINTN Entries[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1394 UINTN Shifts[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1395 UINTN Indices[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1396 UINT64 Tables[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1397 UINT64 Addresses[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1398 UINT64 TableEntry;\r
1399 UINT64 Address;\r
1400 INTN Level;\r
1401 UINTN RepeatZero;\r
1402 CHAR8 String[GUARDED_HEAP_MAP_ENTRY_BITS + 1];\r
1403 CHAR8 *Ruler1;\r
1404 CHAR8 *Ruler2;\r
1405\r
c6c50165
JW
1406 if (mGuardedMemoryMap == 0 ||\r
1407 mMapLevel == 0 ||\r
1408 mMapLevel > GUARDED_HEAP_MAP_TABLE_DEPTH) {\r
e63da9f0
JW
1409 return;\r
1410 }\r
1411\r
1412 Ruler1 = " 3 2 1 0";\r
1413 Ruler2 = "FEDCBA9876543210FEDCBA9876543210FEDCBA9876543210FEDCBA9876543210";\r
1414\r
1415 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "============================="\r
1416 " Guarded Memory Bitmap "\r
1417 "==============================\r\n"));\r
1418 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, " %a\r\n", Ruler1));\r
1419 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, " %a\r\n", Ruler2));\r
1420\r
1421 CopyMem (Entries, mLevelMask, sizeof (Entries));\r
1422 CopyMem (Shifts, mLevelShift, sizeof (Shifts));\r
1423\r
1424 SetMem (Indices, sizeof(Indices), 0);\r
1425 SetMem (Tables, sizeof(Tables), 0);\r
1426 SetMem (Addresses, sizeof(Addresses), 0);\r
1427\r
1428 Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;\r
1429 Tables[Level] = mGuardedMemoryMap;\r
1430 Address = 0;\r
1431 RepeatZero = 0;\r
1432\r
1433 while (TRUE) {\r
1434 if (Indices[Level] > Entries[Level]) {\r
1435\r
1436 Tables[Level] = 0;\r
1437 Level -= 1;\r
1438 RepeatZero = 0;\r
1439\r
1440 DEBUG ((\r
1441 HEAP_GUARD_DEBUG_LEVEL,\r
1442 "========================================="\r
1443 "=========================================\r\n"\r
1444 ));\r
1445\r
1446 } else {\r
1447\r
1448 TableEntry = ((UINT64 *)(UINTN)Tables[Level])[Indices[Level]];\r
1449 Address = Addresses[Level];\r
1450\r
1451 if (TableEntry == 0) {\r
1452\r
1453 if (Level == GUARDED_HEAP_MAP_TABLE_DEPTH - 1) {\r
1454 if (RepeatZero == 0) {\r
1455 Uint64ToBinString(TableEntry, String);\r
1456 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "%016lx: %a\r\n", Address, String));\r
1457 } else if (RepeatZero == 1) {\r
1458 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "... : ...\r\n"));\r
1459 }\r
1460 RepeatZero += 1;\r
1461 }\r
1462\r
1463 } else if (Level < GUARDED_HEAP_MAP_TABLE_DEPTH - 1) {\r
1464\r
1465 Level += 1;\r
1466 Tables[Level] = TableEntry;\r
1467 Addresses[Level] = Address;\r
1468 Indices[Level] = 0;\r
1469 RepeatZero = 0;\r
1470\r
1471 continue;\r
1472\r
1473 } else {\r
1474\r
1475 RepeatZero = 0;\r
1476 Uint64ToBinString(TableEntry, String);\r
1477 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "%016lx: %a\r\n", Address, String));\r
1478\r
1479 }\r
1480 }\r
1481\r
1482 if (Level < (GUARDED_HEAP_MAP_TABLE_DEPTH - (INTN)mMapLevel)) {\r
1483 break;\r
1484 }\r
1485\r
1486 Indices[Level] += 1;\r
1487 Address = (Level == 0) ? 0 : Addresses[Level - 1];\r
1488 Addresses[Level] = Address | LShiftU64(Indices[Level], Shifts[Level]);\r
1489\r
1490 }\r
1491}\r
1492\r
1493/**\r
1494 Debug function used to verify if the Guard page is well set or not.\r
1495\r
1496 @param[in] BaseAddress Address of memory to check.\r
1497 @param[in] NumberOfPages Size of memory in pages.\r
1498\r
1499 @return TRUE The head Guard and tail Guard are both well set.\r
1500 @return FALSE The head Guard and/or tail Guard are not well set.\r
1501**/\r
1502BOOLEAN\r
1503VerifyMemoryGuard (\r
1504 IN EFI_PHYSICAL_ADDRESS BaseAddress,\r
1505 IN UINTN NumberOfPages\r
1506 )\r
1507{\r
1508 EFI_STATUS Status;\r
1509 UINT64 Attribute;\r
1510 EFI_PHYSICAL_ADDRESS Address;\r
1511\r
1512 if (mSmmMemoryAttribute == NULL) {\r
1513 return TRUE;\r
1514 }\r
1515\r
1516 Attribute = 0;\r
1517 Address = BaseAddress - EFI_PAGE_SIZE;\r
1518 Status = mSmmMemoryAttribute->GetMemoryAttributes (\r
1519 mSmmMemoryAttribute,\r
1520 Address,\r
1521 EFI_PAGE_SIZE,\r
1522 &Attribute\r
1523 );\r
1524 if (EFI_ERROR (Status) || (Attribute & EFI_MEMORY_RP) == 0) {\r
1525 DEBUG ((DEBUG_ERROR, "Head Guard is not set at: %016lx (%016lX)!!!\r\n",\r
1526 Address, Attribute));\r
1527 DumpGuardedMemoryBitmap ();\r
1528 return FALSE;\r
1529 }\r
1530\r
1531 Attribute = 0;\r
1532 Address = BaseAddress + EFI_PAGES_TO_SIZE (NumberOfPages);\r
1533 Status = mSmmMemoryAttribute->GetMemoryAttributes (\r
1534 mSmmMemoryAttribute,\r
1535 Address,\r
1536 EFI_PAGE_SIZE,\r
1537 &Attribute\r
1538 );\r
1539 if (EFI_ERROR (Status) || (Attribute & EFI_MEMORY_RP) == 0) {\r
1540 DEBUG ((DEBUG_ERROR, "Tail Guard is not set at: %016lx (%016lX)!!!\r\n",\r
1541 Address, Attribute));\r
1542 DumpGuardedMemoryBitmap ();\r
1543 return FALSE;\r
1544 }\r
1545\r
1546 return TRUE;\r
1547}\r
1548\r