MdeModulePkg/PiSmmCore: fix #PF caused by freeing read-only memory
[mirror_edk2.git] / MdeModulePkg / Core / PiSmmCore / HeapGuard.c
CommitLineData
e63da9f0
JW
1/** @file\r
2 UEFI Heap Guard functions.\r
3\r
8b13bca9 4Copyright (c) 2017-2018, Intel Corporation. All rights reserved.<BR>\r
e63da9f0
JW
5This program and the accompanying materials\r
6are licensed and made available under the terms and conditions of the BSD License\r
7which accompanies this distribution. The full text of the license may be found at\r
8http://opensource.org/licenses/bsd-license.php\r
9\r
10THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
11WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
12\r
13**/\r
14\r
15#include "HeapGuard.h"\r
16\r
17//\r
18// Global to avoid infinite reentrance of memory allocation when updating\r
19// page table attributes, which may need allocating pages for new PDE/PTE.\r
20//\r
21GLOBAL_REMOVE_IF_UNREFERENCED BOOLEAN mOnGuarding = FALSE;\r
22\r
23//\r
24// Pointer to table tracking the Guarded memory with bitmap, in which '1'\r
25// is used to indicate memory guarded. '0' might be free memory or Guard\r
26// page itself, depending on status of memory adjacent to it.\r
27//\r
28GLOBAL_REMOVE_IF_UNREFERENCED UINT64 mGuardedMemoryMap = 0;\r
29\r
30//\r
31// Current depth level of map table pointed by mGuardedMemoryMap.\r
32// mMapLevel must be initialized at least by 1. It will be automatically\r
33// updated according to the address of memory just tracked.\r
34//\r
35GLOBAL_REMOVE_IF_UNREFERENCED UINTN mMapLevel = 1;\r
36\r
37//\r
38// Shift and mask for each level of map table\r
39//\r
40GLOBAL_REMOVE_IF_UNREFERENCED UINTN mLevelShift[GUARDED_HEAP_MAP_TABLE_DEPTH]\r
41 = GUARDED_HEAP_MAP_TABLE_DEPTH_SHIFTS;\r
42GLOBAL_REMOVE_IF_UNREFERENCED UINTN mLevelMask[GUARDED_HEAP_MAP_TABLE_DEPTH]\r
43 = GUARDED_HEAP_MAP_TABLE_DEPTH_MASKS;\r
44\r
45//\r
46// SMM memory attribute protocol\r
47//\r
48EDKII_SMM_MEMORY_ATTRIBUTE_PROTOCOL *mSmmMemoryAttribute = NULL;\r
49\r
50/**\r
51 Set corresponding bits in bitmap table to 1 according to the address.\r
52\r
53 @param[in] Address Start address to set for.\r
54 @param[in] BitNumber Number of bits to set.\r
55 @param[in] BitMap Pointer to bitmap which covers the Address.\r
56\r
57 @return VOID\r
58**/\r
59STATIC\r
60VOID\r
61SetBits (\r
62 IN EFI_PHYSICAL_ADDRESS Address,\r
63 IN UINTN BitNumber,\r
64 IN UINT64 *BitMap\r
65 )\r
66{\r
67 UINTN Lsbs;\r
68 UINTN Qwords;\r
69 UINTN Msbs;\r
70 UINTN StartBit;\r
71 UINTN EndBit;\r
72\r
73 StartBit = (UINTN)GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address);\r
74 EndBit = (StartBit + BitNumber - 1) % GUARDED_HEAP_MAP_ENTRY_BITS;\r
75\r
883787a2 76 if ((StartBit + BitNumber) >= GUARDED_HEAP_MAP_ENTRY_BITS) {\r
e63da9f0
JW
77 Msbs = (GUARDED_HEAP_MAP_ENTRY_BITS - StartBit) %\r
78 GUARDED_HEAP_MAP_ENTRY_BITS;\r
79 Lsbs = (EndBit + 1) % GUARDED_HEAP_MAP_ENTRY_BITS;\r
80 Qwords = (BitNumber - Msbs) / GUARDED_HEAP_MAP_ENTRY_BITS;\r
81 } else {\r
82 Msbs = BitNumber;\r
83 Lsbs = 0;\r
84 Qwords = 0;\r
85 }\r
86\r
87 if (Msbs > 0) {\r
88 *BitMap |= LShiftU64 (LShiftU64 (1, Msbs) - 1, StartBit);\r
89 BitMap += 1;\r
90 }\r
91\r
92 if (Qwords > 0) {\r
93 SetMem64 ((VOID *)BitMap, Qwords * GUARDED_HEAP_MAP_ENTRY_BYTES,\r
94 (UINT64)-1);\r
95 BitMap += Qwords;\r
96 }\r
97\r
98 if (Lsbs > 0) {\r
99 *BitMap |= (LShiftU64 (1, Lsbs) - 1);\r
100 }\r
101}\r
102\r
103/**\r
104 Set corresponding bits in bitmap table to 0 according to the address.\r
105\r
106 @param[in] Address Start address to set for.\r
107 @param[in] BitNumber Number of bits to set.\r
108 @param[in] BitMap Pointer to bitmap which covers the Address.\r
109\r
110 @return VOID.\r
111**/\r
112STATIC\r
113VOID\r
114ClearBits (\r
115 IN EFI_PHYSICAL_ADDRESS Address,\r
116 IN UINTN BitNumber,\r
117 IN UINT64 *BitMap\r
118 )\r
119{\r
120 UINTN Lsbs;\r
121 UINTN Qwords;\r
122 UINTN Msbs;\r
123 UINTN StartBit;\r
124 UINTN EndBit;\r
125\r
126 StartBit = (UINTN)GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address);\r
127 EndBit = (StartBit + BitNumber - 1) % GUARDED_HEAP_MAP_ENTRY_BITS;\r
128\r
883787a2 129 if ((StartBit + BitNumber) >= GUARDED_HEAP_MAP_ENTRY_BITS) {\r
e63da9f0
JW
130 Msbs = (GUARDED_HEAP_MAP_ENTRY_BITS - StartBit) %\r
131 GUARDED_HEAP_MAP_ENTRY_BITS;\r
132 Lsbs = (EndBit + 1) % GUARDED_HEAP_MAP_ENTRY_BITS;\r
133 Qwords = (BitNumber - Msbs) / GUARDED_HEAP_MAP_ENTRY_BITS;\r
134 } else {\r
135 Msbs = BitNumber;\r
136 Lsbs = 0;\r
137 Qwords = 0;\r
138 }\r
139\r
140 if (Msbs > 0) {\r
141 *BitMap &= ~LShiftU64 (LShiftU64 (1, Msbs) - 1, StartBit);\r
142 BitMap += 1;\r
143 }\r
144\r
145 if (Qwords > 0) {\r
146 SetMem64 ((VOID *)BitMap, Qwords * GUARDED_HEAP_MAP_ENTRY_BYTES, 0);\r
147 BitMap += Qwords;\r
148 }\r
149\r
150 if (Lsbs > 0) {\r
151 *BitMap &= ~(LShiftU64 (1, Lsbs) - 1);\r
152 }\r
153}\r
154\r
155/**\r
156 Get corresponding bits in bitmap table according to the address.\r
157\r
158 The value of bit 0 corresponds to the status of memory at given Address.\r
159 No more than 64 bits can be retrieved in one call.\r
160\r
161 @param[in] Address Start address to retrieve bits for.\r
162 @param[in] BitNumber Number of bits to get.\r
163 @param[in] BitMap Pointer to bitmap which covers the Address.\r
164\r
165 @return An integer containing the bits information.\r
166**/\r
167STATIC\r
168UINT64\r
169GetBits (\r
170 IN EFI_PHYSICAL_ADDRESS Address,\r
171 IN UINTN BitNumber,\r
172 IN UINT64 *BitMap\r
173 )\r
174{\r
175 UINTN StartBit;\r
176 UINTN EndBit;\r
177 UINTN Lsbs;\r
178 UINTN Msbs;\r
179 UINT64 Result;\r
180\r
181 ASSERT (BitNumber <= GUARDED_HEAP_MAP_ENTRY_BITS);\r
182\r
183 StartBit = (UINTN)GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address);\r
184 EndBit = (StartBit + BitNumber - 1) % GUARDED_HEAP_MAP_ENTRY_BITS;\r
185\r
186 if ((StartBit + BitNumber) > GUARDED_HEAP_MAP_ENTRY_BITS) {\r
187 Msbs = GUARDED_HEAP_MAP_ENTRY_BITS - StartBit;\r
188 Lsbs = (EndBit + 1) % GUARDED_HEAP_MAP_ENTRY_BITS;\r
189 } else {\r
190 Msbs = BitNumber;\r
191 Lsbs = 0;\r
192 }\r
193\r
883787a2
JW
194 if (StartBit == 0 && BitNumber == GUARDED_HEAP_MAP_ENTRY_BITS) {\r
195 Result = *BitMap;\r
196 } else {\r
197 Result = RShiftU64((*BitMap), StartBit) & (LShiftU64(1, Msbs) - 1);\r
198 if (Lsbs > 0) {\r
199 BitMap += 1;\r
200 Result |= LShiftU64 ((*BitMap) & (LShiftU64 (1, Lsbs) - 1), Msbs);\r
201 }\r
e63da9f0
JW
202 }\r
203\r
204 return Result;\r
205}\r
206\r
207/**\r
208 Helper function to allocate pages without Guard for internal uses.\r
209\r
210 @param[in] Pages Page number.\r
211\r
212 @return Address of memory allocated.\r
213**/\r
214VOID *\r
215PageAlloc (\r
216 IN UINTN Pages\r
217 )\r
218{\r
219 EFI_STATUS Status;\r
220 EFI_PHYSICAL_ADDRESS Memory;\r
221\r
222 Status = SmmInternalAllocatePages (AllocateAnyPages, EfiRuntimeServicesData,\r
223 Pages, &Memory, FALSE);\r
224 if (EFI_ERROR (Status)) {\r
225 Memory = 0;\r
226 }\r
227\r
228 return (VOID *)(UINTN)Memory;\r
229}\r
230\r
231/**\r
232 Locate the pointer of bitmap from the guarded memory bitmap tables, which\r
233 covers the given Address.\r
234\r
235 @param[in] Address Start address to search the bitmap for.\r
236 @param[in] AllocMapUnit Flag to indicate memory allocation for the table.\r
237 @param[out] BitMap Pointer to bitmap which covers the Address.\r
238\r
239 @return The bit number from given Address to the end of current map table.\r
240**/\r
241UINTN\r
242FindGuardedMemoryMap (\r
243 IN EFI_PHYSICAL_ADDRESS Address,\r
244 IN BOOLEAN AllocMapUnit,\r
245 OUT UINT64 **BitMap\r
246 )\r
247{\r
248 UINTN Level;\r
249 UINT64 *GuardMap;\r
250 UINT64 MapMemory;\r
251 UINTN Index;\r
252 UINTN Size;\r
253 UINTN BitsToUnitEnd;\r
254\r
255 //\r
256 // Adjust current map table depth according to the address to access\r
257 //\r
12957e56
JW
258 while (AllocMapUnit &&\r
259 mMapLevel < GUARDED_HEAP_MAP_TABLE_DEPTH &&\r
e63da9f0
JW
260 RShiftU64 (\r
261 Address,\r
262 mLevelShift[GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel - 1]\r
263 ) != 0) {\r
264\r
265 if (mGuardedMemoryMap != 0) {\r
266 Size = (mLevelMask[GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel - 1] + 1)\r
267 * GUARDED_HEAP_MAP_ENTRY_BYTES;\r
268 MapMemory = (UINT64)(UINTN)PageAlloc (EFI_SIZE_TO_PAGES (Size));\r
269 ASSERT (MapMemory != 0);\r
270\r
271 SetMem ((VOID *)(UINTN)MapMemory, Size, 0);\r
272\r
273 *(UINT64 *)(UINTN)MapMemory = mGuardedMemoryMap;\r
274 mGuardedMemoryMap = MapMemory;\r
275 }\r
276\r
277 mMapLevel++;\r
278\r
279 }\r
280\r
281 GuardMap = &mGuardedMemoryMap;\r
282 for (Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;\r
283 Level < GUARDED_HEAP_MAP_TABLE_DEPTH;\r
284 ++Level) {\r
285\r
286 if (*GuardMap == 0) {\r
287 if (!AllocMapUnit) {\r
288 GuardMap = NULL;\r
289 break;\r
290 }\r
291\r
292 Size = (mLevelMask[Level] + 1) * GUARDED_HEAP_MAP_ENTRY_BYTES;\r
293 MapMemory = (UINT64)(UINTN)PageAlloc (EFI_SIZE_TO_PAGES (Size));\r
294 ASSERT (MapMemory != 0);\r
295\r
296 SetMem ((VOID *)(UINTN)MapMemory, Size, 0);\r
297 *GuardMap = MapMemory;\r
298 }\r
299\r
300 Index = (UINTN)RShiftU64 (Address, mLevelShift[Level]);\r
301 Index &= mLevelMask[Level];\r
302 GuardMap = (UINT64 *)(UINTN)((*GuardMap) + Index * sizeof (UINT64));\r
303\r
304 }\r
305\r
306 BitsToUnitEnd = GUARDED_HEAP_MAP_BITS - GUARDED_HEAP_MAP_BIT_INDEX (Address);\r
307 *BitMap = GuardMap;\r
308\r
309 return BitsToUnitEnd;\r
310}\r
311\r
312/**\r
313 Set corresponding bits in bitmap table to 1 according to given memory range.\r
314\r
315 @param[in] Address Memory address to guard from.\r
316 @param[in] NumberOfPages Number of pages to guard.\r
317\r
318 @return VOID\r
319**/\r
320VOID\r
321EFIAPI\r
322SetGuardedMemoryBits (\r
323 IN EFI_PHYSICAL_ADDRESS Address,\r
324 IN UINTN NumberOfPages\r
325 )\r
326{\r
327 UINT64 *BitMap;\r
328 UINTN Bits;\r
329 UINTN BitsToUnitEnd;\r
330\r
331 while (NumberOfPages > 0) {\r
332 BitsToUnitEnd = FindGuardedMemoryMap (Address, TRUE, &BitMap);\r
333 ASSERT (BitMap != NULL);\r
334\r
335 if (NumberOfPages > BitsToUnitEnd) {\r
336 // Cross map unit\r
337 Bits = BitsToUnitEnd;\r
338 } else {\r
339 Bits = NumberOfPages;\r
340 }\r
341\r
342 SetBits (Address, Bits, BitMap);\r
343\r
344 NumberOfPages -= Bits;\r
345 Address += EFI_PAGES_TO_SIZE (Bits);\r
346 }\r
347}\r
348\r
349/**\r
350 Clear corresponding bits in bitmap table according to given memory range.\r
351\r
352 @param[in] Address Memory address to unset from.\r
353 @param[in] NumberOfPages Number of pages to unset guard.\r
354\r
355 @return VOID\r
356**/\r
357VOID\r
358EFIAPI\r
359ClearGuardedMemoryBits (\r
360 IN EFI_PHYSICAL_ADDRESS Address,\r
361 IN UINTN NumberOfPages\r
362 )\r
363{\r
364 UINT64 *BitMap;\r
365 UINTN Bits;\r
366 UINTN BitsToUnitEnd;\r
367\r
368 while (NumberOfPages > 0) {\r
369 BitsToUnitEnd = FindGuardedMemoryMap (Address, TRUE, &BitMap);\r
370 ASSERT (BitMap != NULL);\r
371\r
372 if (NumberOfPages > BitsToUnitEnd) {\r
373 // Cross map unit\r
374 Bits = BitsToUnitEnd;\r
375 } else {\r
376 Bits = NumberOfPages;\r
377 }\r
378\r
379 ClearBits (Address, Bits, BitMap);\r
380\r
381 NumberOfPages -= Bits;\r
382 Address += EFI_PAGES_TO_SIZE (Bits);\r
383 }\r
384}\r
385\r
386/**\r
387 Retrieve corresponding bits in bitmap table according to given memory range.\r
388\r
389 @param[in] Address Memory address to retrieve from.\r
390 @param[in] NumberOfPages Number of pages to retrieve.\r
391\r
392 @return An integer containing the guarded memory bitmap.\r
393**/\r
394UINTN\r
395GetGuardedMemoryBits (\r
396 IN EFI_PHYSICAL_ADDRESS Address,\r
397 IN UINTN NumberOfPages\r
398 )\r
399{\r
400 UINT64 *BitMap;\r
401 UINTN Bits;\r
402 UINTN Result;\r
403 UINTN Shift;\r
404 UINTN BitsToUnitEnd;\r
405\r
406 ASSERT (NumberOfPages <= GUARDED_HEAP_MAP_ENTRY_BITS);\r
407\r
408 Result = 0;\r
409 Shift = 0;\r
410 while (NumberOfPages > 0) {\r
411 BitsToUnitEnd = FindGuardedMemoryMap (Address, FALSE, &BitMap);\r
412\r
413 if (NumberOfPages > BitsToUnitEnd) {\r
414 // Cross map unit\r
415 Bits = BitsToUnitEnd;\r
416 } else {\r
417 Bits = NumberOfPages;\r
418 }\r
419\r
420 if (BitMap != NULL) {\r
421 Result |= LShiftU64 (GetBits (Address, Bits, BitMap), Shift);\r
422 }\r
423\r
424 Shift += Bits;\r
425 NumberOfPages -= Bits;\r
426 Address += EFI_PAGES_TO_SIZE (Bits);\r
427 }\r
428\r
429 return Result;\r
430}\r
431\r
432/**\r
433 Get bit value in bitmap table for the given address.\r
434\r
435 @param[in] Address The address to retrieve for.\r
436\r
437 @return 1 or 0.\r
438**/\r
439UINTN\r
440EFIAPI\r
441GetGuardMapBit (\r
442 IN EFI_PHYSICAL_ADDRESS Address\r
443 )\r
444{\r
445 UINT64 *GuardMap;\r
446\r
447 FindGuardedMemoryMap (Address, FALSE, &GuardMap);\r
448 if (GuardMap != NULL) {\r
449 if (RShiftU64 (*GuardMap,\r
450 GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address)) & 1) {\r
451 return 1;\r
452 }\r
453 }\r
454\r
455 return 0;\r
456}\r
457\r
458/**\r
459 Set the bit in bitmap table for the given address.\r
460\r
461 @param[in] Address The address to set for.\r
462\r
463 @return VOID.\r
464**/\r
465VOID\r
466EFIAPI\r
467SetGuardMapBit (\r
468 IN EFI_PHYSICAL_ADDRESS Address\r
469 )\r
470{\r
471 UINT64 *GuardMap;\r
472 UINT64 BitMask;\r
473\r
474 FindGuardedMemoryMap (Address, TRUE, &GuardMap);\r
475 if (GuardMap != NULL) {\r
476 BitMask = LShiftU64 (1, GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address));\r
477 *GuardMap |= BitMask;\r
478 }\r
479}\r
480\r
481/**\r
482 Clear the bit in bitmap table for the given address.\r
483\r
484 @param[in] Address The address to clear for.\r
485\r
486 @return VOID.\r
487**/\r
488VOID\r
489EFIAPI\r
490ClearGuardMapBit (\r
491 IN EFI_PHYSICAL_ADDRESS Address\r
492 )\r
493{\r
494 UINT64 *GuardMap;\r
495 UINT64 BitMask;\r
496\r
497 FindGuardedMemoryMap (Address, TRUE, &GuardMap);\r
498 if (GuardMap != NULL) {\r
499 BitMask = LShiftU64 (1, GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address));\r
500 *GuardMap &= ~BitMask;\r
501 }\r
502}\r
503\r
504/**\r
505 Check to see if the page at the given address is a Guard page or not.\r
506\r
507 @param[in] Address The address to check for.\r
508\r
509 @return TRUE The page at Address is a Guard page.\r
510 @return FALSE The page at Address is not a Guard page.\r
511**/\r
512BOOLEAN\r
513EFIAPI\r
514IsGuardPage (\r
515 IN EFI_PHYSICAL_ADDRESS Address\r
516)\r
517{\r
518 UINTN BitMap;\r
519\r
520 //\r
521 // There must be at least one guarded page before and/or after given\r
522 // address if it's a Guard page. The bitmap pattern should be one of\r
523 // 001, 100 and 101\r
524 //\r
525 BitMap = GetGuardedMemoryBits (Address - EFI_PAGE_SIZE, 3);\r
526 return ((BitMap == BIT0) || (BitMap == BIT2) || (BitMap == (BIT2 | BIT0)));\r
527}\r
528\r
529/**\r
530 Check to see if the page at the given address is a head Guard page or not.\r
531\r
532 @param[in] Address The address to check for.\r
533\r
534 @return TRUE The page at Address is a head Guard page.\r
535 @return FALSE The page at Address is not a head Guard page.\r
536**/\r
537BOOLEAN\r
538EFIAPI\r
539IsHeadGuard (\r
540 IN EFI_PHYSICAL_ADDRESS Address\r
541 )\r
542{\r
543 return (GetGuardedMemoryBits (Address, 2) == BIT1);\r
544}\r
545\r
546/**\r
547 Check to see if the page at the given address is a tail Guard page or not.\r
548\r
549 @param[in] Address The address to check for.\r
550\r
551 @return TRUE The page at Address is a tail Guard page.\r
552 @return FALSE The page at Address is not a tail Guard page.\r
553**/\r
554BOOLEAN\r
555EFIAPI\r
556IsTailGuard (\r
557 IN EFI_PHYSICAL_ADDRESS Address\r
558 )\r
559{\r
560 return (GetGuardedMemoryBits (Address - EFI_PAGE_SIZE, 2) == BIT0);\r
561}\r
562\r
563/**\r
564 Check to see if the page at the given address is guarded or not.\r
565\r
566 @param[in] Address The address to check for.\r
567\r
568 @return TRUE The page at Address is guarded.\r
569 @return FALSE The page at Address is not guarded.\r
570**/\r
571BOOLEAN\r
572EFIAPI\r
573IsMemoryGuarded (\r
574 IN EFI_PHYSICAL_ADDRESS Address\r
575 )\r
576{\r
577 return (GetGuardMapBit (Address) == 1);\r
578}\r
579\r
580/**\r
581 Set the page at the given address to be a Guard page.\r
582\r
583 This is done by changing the page table attribute to be NOT PRSENT.\r
584\r
585 @param[in] BaseAddress Page address to Guard at.\r
586\r
587 @return VOID.\r
588**/\r
589VOID\r
590EFIAPI\r
591SetGuardPage (\r
592 IN EFI_PHYSICAL_ADDRESS BaseAddress\r
593 )\r
594{\r
595 if (mSmmMemoryAttribute != NULL) {\r
596 mOnGuarding = TRUE;\r
597 mSmmMemoryAttribute->SetMemoryAttributes (\r
598 mSmmMemoryAttribute,\r
599 BaseAddress,\r
600 EFI_PAGE_SIZE,\r
601 EFI_MEMORY_RP\r
602 );\r
603 mOnGuarding = FALSE;\r
604 }\r
605}\r
606\r
607/**\r
608 Unset the Guard page at the given address to the normal memory.\r
609\r
610 This is done by changing the page table attribute to be PRSENT.\r
611\r
612 @param[in] BaseAddress Page address to Guard at.\r
613\r
614 @return VOID.\r
615**/\r
616VOID\r
617EFIAPI\r
618UnsetGuardPage (\r
619 IN EFI_PHYSICAL_ADDRESS BaseAddress\r
620 )\r
621{\r
622 if (mSmmMemoryAttribute != NULL) {\r
623 mOnGuarding = TRUE;\r
624 mSmmMemoryAttribute->ClearMemoryAttributes (\r
625 mSmmMemoryAttribute,\r
626 BaseAddress,\r
627 EFI_PAGE_SIZE,\r
628 EFI_MEMORY_RP\r
629 );\r
630 mOnGuarding = FALSE;\r
631 }\r
632}\r
633\r
634/**\r
635 Check to see if the memory at the given address should be guarded or not.\r
636\r
637 @param[in] MemoryType Memory type to check.\r
638 @param[in] AllocateType Allocation type to check.\r
639 @param[in] PageOrPool Indicate a page allocation or pool allocation.\r
640\r
641\r
642 @return TRUE The given type of memory should be guarded.\r
643 @return FALSE The given type of memory should not be guarded.\r
644**/\r
645BOOLEAN\r
646IsMemoryTypeToGuard (\r
647 IN EFI_MEMORY_TYPE MemoryType,\r
648 IN EFI_ALLOCATE_TYPE AllocateType,\r
649 IN UINT8 PageOrPool\r
650 )\r
651{\r
652 UINT64 TestBit;\r
653 UINT64 ConfigBit;\r
654\r
655 if ((PcdGet8 (PcdHeapGuardPropertyMask) & PageOrPool) == 0\r
656 || mOnGuarding\r
657 || AllocateType == AllocateAddress) {\r
658 return FALSE;\r
659 }\r
660\r
661 ConfigBit = 0;\r
662 if ((PageOrPool & GUARD_HEAP_TYPE_POOL) != 0) {\r
663 ConfigBit |= PcdGet64 (PcdHeapGuardPoolType);\r
664 }\r
665\r
666 if ((PageOrPool & GUARD_HEAP_TYPE_PAGE) != 0) {\r
667 ConfigBit |= PcdGet64 (PcdHeapGuardPageType);\r
668 }\r
669\r
670 if (MemoryType == EfiRuntimeServicesData ||\r
671 MemoryType == EfiRuntimeServicesCode) {\r
672 TestBit = LShiftU64 (1, MemoryType);\r
673 } else if (MemoryType == EfiMaxMemoryType) {\r
674 TestBit = (UINT64)-1;\r
675 } else {\r
676 TestBit = 0;\r
677 }\r
678\r
679 return ((ConfigBit & TestBit) != 0);\r
680}\r
681\r
682/**\r
683 Check to see if the pool at the given address should be guarded or not.\r
684\r
685 @param[in] MemoryType Pool type to check.\r
686\r
687\r
688 @return TRUE The given type of pool should be guarded.\r
689 @return FALSE The given type of pool should not be guarded.\r
690**/\r
691BOOLEAN\r
692IsPoolTypeToGuard (\r
693 IN EFI_MEMORY_TYPE MemoryType\r
694 )\r
695{\r
696 return IsMemoryTypeToGuard (MemoryType, AllocateAnyPages,\r
697 GUARD_HEAP_TYPE_POOL);\r
698}\r
699\r
700/**\r
701 Check to see if the page at the given address should be guarded or not.\r
702\r
703 @param[in] MemoryType Page type to check.\r
704 @param[in] AllocateType Allocation type to check.\r
705\r
706 @return TRUE The given type of page should be guarded.\r
707 @return FALSE The given type of page should not be guarded.\r
708**/\r
709BOOLEAN\r
710IsPageTypeToGuard (\r
711 IN EFI_MEMORY_TYPE MemoryType,\r
712 IN EFI_ALLOCATE_TYPE AllocateType\r
713 )\r
714{\r
715 return IsMemoryTypeToGuard (MemoryType, AllocateType, GUARD_HEAP_TYPE_PAGE);\r
716}\r
717\r
718/**\r
719 Check to see if the heap guard is enabled for page and/or pool allocation.\r
720\r
721 @return TRUE/FALSE.\r
722**/\r
723BOOLEAN\r
724IsHeapGuardEnabled (\r
725 VOID\r
726 )\r
727{\r
728 return IsMemoryTypeToGuard (EfiMaxMemoryType, AllocateAnyPages,\r
729 GUARD_HEAP_TYPE_POOL|GUARD_HEAP_TYPE_PAGE);\r
730}\r
731\r
732/**\r
733 Set head Guard and tail Guard for the given memory range.\r
734\r
735 @param[in] Memory Base address of memory to set guard for.\r
736 @param[in] NumberOfPages Memory size in pages.\r
737\r
738 @return VOID.\r
739**/\r
740VOID\r
741SetGuardForMemory (\r
742 IN EFI_PHYSICAL_ADDRESS Memory,\r
743 IN UINTN NumberOfPages\r
744 )\r
745{\r
746 EFI_PHYSICAL_ADDRESS GuardPage;\r
747\r
748 //\r
749 // Set tail Guard\r
750 //\r
751 GuardPage = Memory + EFI_PAGES_TO_SIZE (NumberOfPages);\r
752 if (!IsGuardPage (GuardPage)) {\r
753 SetGuardPage (GuardPage);\r
754 }\r
755\r
756 // Set head Guard\r
757 GuardPage = Memory - EFI_PAGES_TO_SIZE (1);\r
758 if (!IsGuardPage (GuardPage)) {\r
759 SetGuardPage (GuardPage);\r
760 }\r
761\r
762 //\r
763 // Mark the memory range as Guarded\r
764 //\r
765 SetGuardedMemoryBits (Memory, NumberOfPages);\r
766}\r
767\r
768/**\r
769 Unset head Guard and tail Guard for the given memory range.\r
770\r
771 @param[in] Memory Base address of memory to unset guard for.\r
772 @param[in] NumberOfPages Memory size in pages.\r
773\r
774 @return VOID.\r
775**/\r
776VOID\r
777UnsetGuardForMemory (\r
778 IN EFI_PHYSICAL_ADDRESS Memory,\r
779 IN UINTN NumberOfPages\r
780 )\r
781{\r
782 EFI_PHYSICAL_ADDRESS GuardPage;\r
38d870fc 783 UINT64 GuardBitmap;\r
e63da9f0
JW
784\r
785 if (NumberOfPages == 0) {\r
786 return;\r
787 }\r
788\r
789 //\r
790 // Head Guard must be one page before, if any.\r
791 //\r
38d870fc
JW
792 // MSB-> 1 0 <-LSB\r
793 // -------------------\r
794 // Head Guard -> 0 1 -> Don't free Head Guard (shared Guard)\r
795 // Head Guard -> 0 0 -> Free Head Guard either (not shared Guard)\r
796 // 1 X -> Don't free first page (need a new Guard)\r
797 // (it'll be turned into a Guard page later)\r
798 // -------------------\r
799 // Start -> -1 -2\r
800 //\r
e63da9f0 801 GuardPage = Memory - EFI_PAGES_TO_SIZE (1);\r
38d870fc
JW
802 GuardBitmap = GetGuardedMemoryBits (Memory - EFI_PAGES_TO_SIZE (2), 2);\r
803 if ((GuardBitmap & BIT1) == 0) {\r
804 //\r
805 // Head Guard exists.\r
806 //\r
807 if ((GuardBitmap & BIT0) == 0) {\r
e63da9f0
JW
808 //\r
809 // If the head Guard is not a tail Guard of adjacent memory block,\r
810 // unset it.\r
811 //\r
812 UnsetGuardPage (GuardPage);\r
813 }\r
38d870fc 814 } else {\r
e63da9f0
JW
815 //\r
816 // Pages before memory to free are still in Guard. It's a partial free\r
817 // case. Turn first page of memory block to free into a new Guard.\r
818 //\r
819 SetGuardPage (Memory);\r
820 }\r
821\r
822 //\r
823 // Tail Guard must be the page after this memory block to free, if any.\r
824 //\r
38d870fc
JW
825 // MSB-> 1 0 <-LSB\r
826 // --------------------\r
827 // 1 0 <- Tail Guard -> Don't free Tail Guard (shared Guard)\r
828 // 0 0 <- Tail Guard -> Free Tail Guard either (not shared Guard)\r
829 // X 1 -> Don't free last page (need a new Guard)\r
830 // (it'll be turned into a Guard page later)\r
831 // --------------------\r
832 // +1 +0 <- End\r
833 //\r
e63da9f0 834 GuardPage = Memory + EFI_PAGES_TO_SIZE (NumberOfPages);\r
38d870fc
JW
835 GuardBitmap = GetGuardedMemoryBits (GuardPage, 2);\r
836 if ((GuardBitmap & BIT0) == 0) {\r
837 //\r
838 // Tail Guard exists.\r
839 //\r
840 if ((GuardBitmap & BIT1) == 0) {\r
e63da9f0
JW
841 //\r
842 // If the tail Guard is not a head Guard of adjacent memory block,\r
843 // free it; otherwise, keep it.\r
844 //\r
845 UnsetGuardPage (GuardPage);\r
846 }\r
38d870fc 847 } else {\r
e63da9f0
JW
848 //\r
849 // Pages after memory to free are still in Guard. It's a partial free\r
850 // case. We need to keep one page to be a head Guard.\r
851 //\r
852 SetGuardPage (GuardPage - EFI_PAGES_TO_SIZE (1));\r
853 }\r
854\r
855 //\r
856 // No matter what, we just clear the mark of the Guarded memory.\r
857 //\r
858 ClearGuardedMemoryBits(Memory, NumberOfPages);\r
859}\r
860\r
861/**\r
862 Adjust address of free memory according to existing and/or required Guard.\r
863\r
864 This function will check if there're existing Guard pages of adjacent\r
865 memory blocks, and try to use it as the Guard page of the memory to be\r
866 allocated.\r
867\r
868 @param[in] Start Start address of free memory block.\r
869 @param[in] Size Size of free memory block.\r
870 @param[in] SizeRequested Size of memory to allocate.\r
871\r
872 @return The end address of memory block found.\r
873 @return 0 if no enough space for the required size of memory and its Guard.\r
874**/\r
875UINT64\r
876AdjustMemoryS (\r
877 IN UINT64 Start,\r
878 IN UINT64 Size,\r
879 IN UINT64 SizeRequested\r
880 )\r
881{\r
882 UINT64 Target;\r
883\r
c44218e5
JW
884 //\r
885 // UEFI spec requires that allocated pool must be 8-byte aligned. If it's\r
886 // indicated to put the pool near the Tail Guard, we need extra bytes to\r
887 // make sure alignment of the returned pool address.\r
888 //\r
889 if ((PcdGet8 (PcdHeapGuardPropertyMask) & BIT7) == 0) {\r
890 SizeRequested = ALIGN_VALUE(SizeRequested, 8);\r
891 }\r
892\r
e63da9f0 893 Target = Start + Size - SizeRequested;\r
12957e56
JW
894 ASSERT (Target >= Start);\r
895 if (Target == 0) {\r
896 return 0;\r
897 }\r
e63da9f0 898\r
e63da9f0
JW
899 if (!IsGuardPage (Start + Size)) {\r
900 // No Guard at tail to share. One more page is needed.\r
901 Target -= EFI_PAGES_TO_SIZE (1);\r
902 }\r
903\r
904 // Out of range?\r
905 if (Target < Start) {\r
906 return 0;\r
907 }\r
908\r
909 // At the edge?\r
910 if (Target == Start) {\r
911 if (!IsGuardPage (Target - EFI_PAGES_TO_SIZE (1))) {\r
912 // No enough space for a new head Guard if no Guard at head to share.\r
913 return 0;\r
914 }\r
915 }\r
916\r
917 // OK, we have enough pages for memory and its Guards. Return the End of the\r
918 // free space.\r
919 return Target + SizeRequested - 1;\r
920}\r
921\r
922/**\r
923 Adjust the start address and number of pages to free according to Guard.\r
924\r
925 The purpose of this function is to keep the shared Guard page with adjacent\r
926 memory block if it's still in guard, or free it if no more sharing. Another\r
927 is to reserve pages as Guard pages in partial page free situation.\r
928\r
929 @param[in,out] Memory Base address of memory to free.\r
930 @param[in,out] NumberOfPages Size of memory to free.\r
931\r
932 @return VOID.\r
933**/\r
934VOID\r
935AdjustMemoryF (\r
936 IN OUT EFI_PHYSICAL_ADDRESS *Memory,\r
937 IN OUT UINTN *NumberOfPages\r
938 )\r
939{\r
940 EFI_PHYSICAL_ADDRESS Start;\r
941 EFI_PHYSICAL_ADDRESS MemoryToTest;\r
942 UINTN PagesToFree;\r
38d870fc 943 UINT64 GuardBitmap;\r
a2f32ef6 944 UINT64 Attributes;\r
e63da9f0
JW
945\r
946 if (Memory == NULL || NumberOfPages == NULL || *NumberOfPages == 0) {\r
947 return;\r
948 }\r
949\r
950 Start = *Memory;\r
951 PagesToFree = *NumberOfPages;\r
952\r
a2f32ef6
JW
953 //\r
954 // In case the memory to free is marked as read-only (e.g. EfiRuntimeServicesCode).\r
955 //\r
956 if (mSmmMemoryAttribute != NULL) {\r
957 Attributes = 0;\r
958 mSmmMemoryAttribute->GetMemoryAttributes (\r
959 mSmmMemoryAttribute,\r
960 Start,\r
961 EFI_PAGES_TO_SIZE (PagesToFree),\r
962 &Attributes\r
963 );\r
964 if ((Attributes & EFI_MEMORY_RO) != 0) {\r
965 mSmmMemoryAttribute->ClearMemoryAttributes (\r
966 mSmmMemoryAttribute,\r
967 Start,\r
968 EFI_PAGES_TO_SIZE (PagesToFree),\r
969 EFI_MEMORY_RO\r
970 );\r
971 }\r
972 }\r
973\r
e63da9f0
JW
974 //\r
975 // Head Guard must be one page before, if any.\r
976 //\r
38d870fc
JW
977 // MSB-> 1 0 <-LSB\r
978 // -------------------\r
979 // Head Guard -> 0 1 -> Don't free Head Guard (shared Guard)\r
980 // Head Guard -> 0 0 -> Free Head Guard either (not shared Guard)\r
981 // 1 X -> Don't free first page (need a new Guard)\r
982 // (it'll be turned into a Guard page later)\r
983 // -------------------\r
984 // Start -> -1 -2\r
985 //\r
986 MemoryToTest = Start - EFI_PAGES_TO_SIZE (2);\r
987 GuardBitmap = GetGuardedMemoryBits (MemoryToTest, 2);\r
988 if ((GuardBitmap & BIT1) == 0) {\r
989 //\r
990 // Head Guard exists.\r
991 //\r
992 if ((GuardBitmap & BIT0) == 0) {\r
e63da9f0
JW
993 //\r
994 // If the head Guard is not a tail Guard of adjacent memory block,\r
995 // free it; otherwise, keep it.\r
996 //\r
997 Start -= EFI_PAGES_TO_SIZE (1);\r
998 PagesToFree += 1;\r
999 }\r
38d870fc 1000 } else {\r
e63da9f0 1001 //\r
38d870fc
JW
1002 // No Head Guard, and pages before memory to free are still in Guard. It's a\r
1003 // partial free case. We need to keep one page to be a tail Guard.\r
e63da9f0
JW
1004 //\r
1005 Start += EFI_PAGES_TO_SIZE (1);\r
1006 PagesToFree -= 1;\r
1007 }\r
1008\r
1009 //\r
1010 // Tail Guard must be the page after this memory block to free, if any.\r
1011 //\r
38d870fc
JW
1012 // MSB-> 1 0 <-LSB\r
1013 // --------------------\r
1014 // 1 0 <- Tail Guard -> Don't free Tail Guard (shared Guard)\r
1015 // 0 0 <- Tail Guard -> Free Tail Guard either (not shared Guard)\r
1016 // X 1 -> Don't free last page (need a new Guard)\r
1017 // (it'll be turned into a Guard page later)\r
1018 // --------------------\r
1019 // +1 +0 <- End\r
1020 //\r
e63da9f0 1021 MemoryToTest = Start + EFI_PAGES_TO_SIZE (PagesToFree);\r
38d870fc
JW
1022 GuardBitmap = GetGuardedMemoryBits (MemoryToTest, 2);\r
1023 if ((GuardBitmap & BIT0) == 0) {\r
1024 //\r
1025 // Tail Guard exists.\r
1026 //\r
1027 if ((GuardBitmap & BIT1) == 0) {\r
e63da9f0
JW
1028 //\r
1029 // If the tail Guard is not a head Guard of adjacent memory block,\r
1030 // free it; otherwise, keep it.\r
1031 //\r
1032 PagesToFree += 1;\r
1033 }\r
38d870fc 1034 } else if (PagesToFree > 0) {\r
e63da9f0 1035 //\r
38d870fc
JW
1036 // No Tail Guard, and pages after memory to free are still in Guard. It's a\r
1037 // partial free case. We need to keep one page to be a head Guard.\r
e63da9f0
JW
1038 //\r
1039 PagesToFree -= 1;\r
1040 }\r
1041\r
1042 *Memory = Start;\r
1043 *NumberOfPages = PagesToFree;\r
1044}\r
1045\r
1046/**\r
1047 Adjust the base and number of pages to really allocate according to Guard.\r
1048\r
1049 @param[in,out] Memory Base address of free memory.\r
1050 @param[in,out] NumberOfPages Size of memory to allocate.\r
1051\r
1052 @return VOID.\r
1053**/\r
1054VOID\r
1055AdjustMemoryA (\r
1056 IN OUT EFI_PHYSICAL_ADDRESS *Memory,\r
1057 IN OUT UINTN *NumberOfPages\r
1058 )\r
1059{\r
1060 //\r
1061 // FindFreePages() has already taken the Guard into account. It's safe to\r
1062 // adjust the start address and/or number of pages here, to make sure that\r
1063 // the Guards are also "allocated".\r
1064 //\r
1065 if (!IsGuardPage (*Memory + EFI_PAGES_TO_SIZE (*NumberOfPages))) {\r
1066 // No tail Guard, add one.\r
1067 *NumberOfPages += 1;\r
1068 }\r
1069\r
1070 if (!IsGuardPage (*Memory - EFI_PAGE_SIZE)) {\r
1071 // No head Guard, add one.\r
1072 *Memory -= EFI_PAGE_SIZE;\r
1073 *NumberOfPages += 1;\r
1074 }\r
1075}\r
1076\r
1077/**\r
1078 Adjust the pool head position to make sure the Guard page is adjavent to\r
1079 pool tail or pool head.\r
1080\r
1081 @param[in] Memory Base address of memory allocated.\r
1082 @param[in] NoPages Number of pages actually allocated.\r
1083 @param[in] Size Size of memory requested.\r
1084 (plus pool head/tail overhead)\r
1085\r
1086 @return Address of pool head\r
1087**/\r
1088VOID *\r
1089AdjustPoolHeadA (\r
1090 IN EFI_PHYSICAL_ADDRESS Memory,\r
1091 IN UINTN NoPages,\r
1092 IN UINTN Size\r
1093 )\r
1094{\r
c44218e5 1095 if (Memory == 0 || (PcdGet8 (PcdHeapGuardPropertyMask) & BIT7) != 0) {\r
e63da9f0
JW
1096 //\r
1097 // Pool head is put near the head Guard\r
1098 //\r
1099 return (VOID *)(UINTN)Memory;\r
1100 }\r
1101\r
1102 //\r
1103 // Pool head is put near the tail Guard\r
1104 //\r
c44218e5 1105 Size = ALIGN_VALUE (Size, 8);\r
e63da9f0
JW
1106 return (VOID *)(UINTN)(Memory + EFI_PAGES_TO_SIZE (NoPages) - Size);\r
1107}\r
1108\r
1109/**\r
1110 Get the page base address according to pool head address.\r
1111\r
1112 @param[in] Memory Head address of pool to free.\r
1113\r
1114 @return Address of pool head.\r
1115**/\r
1116VOID *\r
1117AdjustPoolHeadF (\r
1118 IN EFI_PHYSICAL_ADDRESS Memory\r
1119 )\r
1120{\r
c44218e5 1121 if (Memory == 0 || (PcdGet8 (PcdHeapGuardPropertyMask) & BIT7) != 0) {\r
e63da9f0
JW
1122 //\r
1123 // Pool head is put near the head Guard\r
1124 //\r
1125 return (VOID *)(UINTN)Memory;\r
1126 }\r
1127\r
1128 //\r
1129 // Pool head is put near the tail Guard\r
1130 //\r
1131 return (VOID *)(UINTN)(Memory & ~EFI_PAGE_MASK);\r
1132}\r
1133\r
1134/**\r
1135 Helper function of memory allocation with Guard pages.\r
1136\r
1137 @param FreePageList The free page node.\r
1138 @param NumberOfPages Number of pages to be allocated.\r
1139 @param MaxAddress Request to allocate memory below this address.\r
1140 @param MemoryType Type of memory requested.\r
1141\r
1142 @return Memory address of allocated pages.\r
1143**/\r
1144UINTN\r
1145InternalAllocMaxAddressWithGuard (\r
1146 IN OUT LIST_ENTRY *FreePageList,\r
1147 IN UINTN NumberOfPages,\r
1148 IN UINTN MaxAddress,\r
1149 IN EFI_MEMORY_TYPE MemoryType\r
1150\r
1151 )\r
1152{\r
1153 LIST_ENTRY *Node;\r
1154 FREE_PAGE_LIST *Pages;\r
1155 UINTN PagesToAlloc;\r
1156 UINTN HeadGuard;\r
1157 UINTN TailGuard;\r
1158 UINTN Address;\r
1159\r
1160 for (Node = FreePageList->BackLink; Node != FreePageList;\r
1161 Node = Node->BackLink) {\r
1162 Pages = BASE_CR (Node, FREE_PAGE_LIST, Link);\r
1163 if (Pages->NumberOfPages >= NumberOfPages &&\r
1164 (UINTN)Pages + EFI_PAGES_TO_SIZE (NumberOfPages) - 1 <= MaxAddress) {\r
1165\r
1166 //\r
1167 // We may need 1 or 2 more pages for Guard. Check it out.\r
1168 //\r
1169 PagesToAlloc = NumberOfPages;\r
1170 TailGuard = (UINTN)Pages + EFI_PAGES_TO_SIZE (Pages->NumberOfPages);\r
1171 if (!IsGuardPage (TailGuard)) {\r
1172 //\r
1173 // Add one if no Guard at the end of current free memory block.\r
1174 //\r
1175 PagesToAlloc += 1;\r
1176 TailGuard = 0;\r
1177 }\r
1178\r
1179 HeadGuard = (UINTN)Pages +\r
1180 EFI_PAGES_TO_SIZE (Pages->NumberOfPages - PagesToAlloc) -\r
1181 EFI_PAGE_SIZE;\r
1182 if (!IsGuardPage (HeadGuard)) {\r
1183 //\r
1184 // Add one if no Guard at the page before the address to allocate\r
1185 //\r
1186 PagesToAlloc += 1;\r
1187 HeadGuard = 0;\r
1188 }\r
1189\r
1190 if (Pages->NumberOfPages < PagesToAlloc) {\r
1191 // Not enough space to allocate memory with Guards? Try next block.\r
1192 continue;\r
1193 }\r
1194\r
1195 Address = InternalAllocPagesOnOneNode (Pages, PagesToAlloc, MaxAddress);\r
1196 ConvertSmmMemoryMapEntry(MemoryType, Address, PagesToAlloc, FALSE);\r
1197 CoreFreeMemoryMapStack();\r
1198 if (HeadGuard == 0) {\r
1199 // Don't pass the Guard page to user.\r
1200 Address += EFI_PAGE_SIZE;\r
1201 }\r
1202 SetGuardForMemory (Address, NumberOfPages);\r
1203 return Address;\r
1204 }\r
1205 }\r
1206\r
1207 return (UINTN)(-1);\r
1208}\r
1209\r
1210/**\r
1211 Helper function of memory free with Guard pages.\r
1212\r
1213 @param[in] Memory Base address of memory being freed.\r
1214 @param[in] NumberOfPages The number of pages to free.\r
1215 @param[in] AddRegion If this memory is new added region.\r
1216\r
1217 @retval EFI_NOT_FOUND Could not find the entry that covers the range.\r
1218 @retval EFI_INVALID_PARAMETER Address not aligned, Address is zero or NumberOfPages is zero.\r
1219 @return EFI_SUCCESS Pages successfully freed.\r
1220**/\r
1221EFI_STATUS\r
1222SmmInternalFreePagesExWithGuard (\r
1223 IN EFI_PHYSICAL_ADDRESS Memory,\r
1224 IN UINTN NumberOfPages,\r
1225 IN BOOLEAN AddRegion\r
1226 )\r
1227{\r
1228 EFI_PHYSICAL_ADDRESS MemoryToFree;\r
1229 UINTN PagesToFree;\r
1230\r
7823611c
JW
1231 if (((Memory & EFI_PAGE_MASK) != 0) || (Memory == 0) || (NumberOfPages == 0)) {\r
1232 return EFI_INVALID_PARAMETER;\r
1233 }\r
1234\r
e63da9f0
JW
1235 MemoryToFree = Memory;\r
1236 PagesToFree = NumberOfPages;\r
1237\r
1238 AdjustMemoryF (&MemoryToFree, &PagesToFree);\r
1239 UnsetGuardForMemory (Memory, NumberOfPages);\r
38d870fc
JW
1240 if (PagesToFree == 0) {\r
1241 return EFI_SUCCESS;\r
1242 }\r
e63da9f0
JW
1243\r
1244 return SmmInternalFreePagesEx (MemoryToFree, PagesToFree, AddRegion);\r
1245}\r
1246\r
1247/**\r
1248 Set all Guard pages which cannot be set during the non-SMM mode time.\r
1249**/\r
1250VOID\r
1251SetAllGuardPages (\r
1252 VOID\r
1253 )\r
1254{\r
1255 UINTN Entries[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1256 UINTN Shifts[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1257 UINTN Indices[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1258 UINT64 Tables[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1259 UINT64 Addresses[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1260 UINT64 TableEntry;\r
1261 UINT64 Address;\r
1262 UINT64 GuardPage;\r
1263 INTN Level;\r
1264 UINTN Index;\r
1265 BOOLEAN OnGuarding;\r
1266\r
c6c50165
JW
1267 if (mGuardedMemoryMap == 0 ||\r
1268 mMapLevel == 0 ||\r
1269 mMapLevel > GUARDED_HEAP_MAP_TABLE_DEPTH) {\r
e63da9f0
JW
1270 return;\r
1271 }\r
1272\r
1273 CopyMem (Entries, mLevelMask, sizeof (Entries));\r
1274 CopyMem (Shifts, mLevelShift, sizeof (Shifts));\r
1275\r
1276 SetMem (Tables, sizeof(Tables), 0);\r
1277 SetMem (Addresses, sizeof(Addresses), 0);\r
1278 SetMem (Indices, sizeof(Indices), 0);\r
1279\r
1280 Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;\r
1281 Tables[Level] = mGuardedMemoryMap;\r
1282 Address = 0;\r
1283 OnGuarding = FALSE;\r
1284\r
1285 DEBUG_CODE (\r
1286 DumpGuardedMemoryBitmap ();\r
1287 );\r
1288\r
1289 while (TRUE) {\r
1290 if (Indices[Level] > Entries[Level]) {\r
1291 Tables[Level] = 0;\r
1292 Level -= 1;\r
1293 } else {\r
1294\r
1295 TableEntry = ((UINT64 *)(UINTN)(Tables[Level]))[Indices[Level]];\r
1296 Address = Addresses[Level];\r
1297\r
1298 if (TableEntry == 0) {\r
1299\r
1300 OnGuarding = FALSE;\r
1301\r
1302 } else if (Level < GUARDED_HEAP_MAP_TABLE_DEPTH - 1) {\r
1303\r
1304 Level += 1;\r
1305 Tables[Level] = TableEntry;\r
1306 Addresses[Level] = Address;\r
1307 Indices[Level] = 0;\r
1308\r
1309 continue;\r
1310\r
1311 } else {\r
1312\r
1313 Index = 0;\r
1314 while (Index < GUARDED_HEAP_MAP_ENTRY_BITS) {\r
1315 if ((TableEntry & 1) == 1) {\r
1316 if (OnGuarding) {\r
1317 GuardPage = 0;\r
1318 } else {\r
1319 GuardPage = Address - EFI_PAGE_SIZE;\r
1320 }\r
1321 OnGuarding = TRUE;\r
1322 } else {\r
1323 if (OnGuarding) {\r
1324 GuardPage = Address;\r
1325 } else {\r
1326 GuardPage = 0;\r
1327 }\r
1328 OnGuarding = FALSE;\r
1329 }\r
1330\r
1331 if (GuardPage != 0) {\r
1332 SetGuardPage (GuardPage);\r
1333 }\r
1334\r
1335 if (TableEntry == 0) {\r
1336 break;\r
1337 }\r
1338\r
1339 TableEntry = RShiftU64 (TableEntry, 1);\r
1340 Address += EFI_PAGE_SIZE;\r
1341 Index += 1;\r
1342 }\r
1343 }\r
1344 }\r
1345\r
1346 if (Level < (GUARDED_HEAP_MAP_TABLE_DEPTH - (INTN)mMapLevel)) {\r
1347 break;\r
1348 }\r
1349\r
1350 Indices[Level] += 1;\r
1351 Address = (Level == 0) ? 0 : Addresses[Level - 1];\r
1352 Addresses[Level] = Address | LShiftU64(Indices[Level], Shifts[Level]);\r
1353\r
1354 }\r
1355}\r
1356\r
1357/**\r
1358 Hook function used to set all Guard pages after entering SMM mode.\r
1359**/\r
1360VOID\r
1361SmmEntryPointMemoryManagementHook (\r
1362 VOID\r
1363 )\r
1364{\r
1365 EFI_STATUS Status;\r
1366\r
1367 if (mSmmMemoryAttribute == NULL) {\r
1368 Status = SmmLocateProtocol (\r
1369 &gEdkiiSmmMemoryAttributeProtocolGuid,\r
1370 NULL,\r
1371 (VOID **)&mSmmMemoryAttribute\r
1372 );\r
1373 if (!EFI_ERROR(Status)) {\r
1374 SetAllGuardPages ();\r
1375 }\r
1376 }\r
1377}\r
1378\r
1379/**\r
1380 Helper function to convert a UINT64 value in binary to a string.\r
1381\r
1382 @param[in] Value Value of a UINT64 integer.\r
1383 @param[out] BinString String buffer to contain the conversion result.\r
1384\r
1385 @return VOID.\r
1386**/\r
1387VOID\r
1388Uint64ToBinString (\r
1389 IN UINT64 Value,\r
1390 OUT CHAR8 *BinString\r
1391 )\r
1392{\r
1393 UINTN Index;\r
1394\r
1395 if (BinString == NULL) {\r
1396 return;\r
1397 }\r
1398\r
1399 for (Index = 64; Index > 0; --Index) {\r
1400 BinString[Index - 1] = '0' + (Value & 1);\r
1401 Value = RShiftU64 (Value, 1);\r
1402 }\r
1403 BinString[64] = '\0';\r
1404}\r
1405\r
1406/**\r
1407 Dump the guarded memory bit map.\r
1408**/\r
1409VOID\r
1410EFIAPI\r
1411DumpGuardedMemoryBitmap (\r
1412 VOID\r
1413 )\r
1414{\r
1415 UINTN Entries[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1416 UINTN Shifts[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1417 UINTN Indices[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1418 UINT64 Tables[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1419 UINT64 Addresses[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1420 UINT64 TableEntry;\r
1421 UINT64 Address;\r
1422 INTN Level;\r
1423 UINTN RepeatZero;\r
1424 CHAR8 String[GUARDED_HEAP_MAP_ENTRY_BITS + 1];\r
1425 CHAR8 *Ruler1;\r
1426 CHAR8 *Ruler2;\r
1427\r
c6c50165
JW
1428 if (mGuardedMemoryMap == 0 ||\r
1429 mMapLevel == 0 ||\r
1430 mMapLevel > GUARDED_HEAP_MAP_TABLE_DEPTH) {\r
e63da9f0
JW
1431 return;\r
1432 }\r
1433\r
1434 Ruler1 = " 3 2 1 0";\r
1435 Ruler2 = "FEDCBA9876543210FEDCBA9876543210FEDCBA9876543210FEDCBA9876543210";\r
1436\r
1437 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "============================="\r
1438 " Guarded Memory Bitmap "\r
1439 "==============================\r\n"));\r
1440 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, " %a\r\n", Ruler1));\r
1441 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, " %a\r\n", Ruler2));\r
1442\r
1443 CopyMem (Entries, mLevelMask, sizeof (Entries));\r
1444 CopyMem (Shifts, mLevelShift, sizeof (Shifts));\r
1445\r
1446 SetMem (Indices, sizeof(Indices), 0);\r
1447 SetMem (Tables, sizeof(Tables), 0);\r
1448 SetMem (Addresses, sizeof(Addresses), 0);\r
1449\r
1450 Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;\r
1451 Tables[Level] = mGuardedMemoryMap;\r
1452 Address = 0;\r
1453 RepeatZero = 0;\r
1454\r
1455 while (TRUE) {\r
1456 if (Indices[Level] > Entries[Level]) {\r
1457\r
1458 Tables[Level] = 0;\r
1459 Level -= 1;\r
1460 RepeatZero = 0;\r
1461\r
1462 DEBUG ((\r
1463 HEAP_GUARD_DEBUG_LEVEL,\r
1464 "========================================="\r
1465 "=========================================\r\n"\r
1466 ));\r
1467\r
1468 } else {\r
1469\r
1470 TableEntry = ((UINT64 *)(UINTN)Tables[Level])[Indices[Level]];\r
1471 Address = Addresses[Level];\r
1472\r
1473 if (TableEntry == 0) {\r
1474\r
1475 if (Level == GUARDED_HEAP_MAP_TABLE_DEPTH - 1) {\r
1476 if (RepeatZero == 0) {\r
1477 Uint64ToBinString(TableEntry, String);\r
1478 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "%016lx: %a\r\n", Address, String));\r
1479 } else if (RepeatZero == 1) {\r
1480 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "... : ...\r\n"));\r
1481 }\r
1482 RepeatZero += 1;\r
1483 }\r
1484\r
1485 } else if (Level < GUARDED_HEAP_MAP_TABLE_DEPTH - 1) {\r
1486\r
1487 Level += 1;\r
1488 Tables[Level] = TableEntry;\r
1489 Addresses[Level] = Address;\r
1490 Indices[Level] = 0;\r
1491 RepeatZero = 0;\r
1492\r
1493 continue;\r
1494\r
1495 } else {\r
1496\r
1497 RepeatZero = 0;\r
1498 Uint64ToBinString(TableEntry, String);\r
1499 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "%016lx: %a\r\n", Address, String));\r
1500\r
1501 }\r
1502 }\r
1503\r
1504 if (Level < (GUARDED_HEAP_MAP_TABLE_DEPTH - (INTN)mMapLevel)) {\r
1505 break;\r
1506 }\r
1507\r
1508 Indices[Level] += 1;\r
1509 Address = (Level == 0) ? 0 : Addresses[Level - 1];\r
1510 Addresses[Level] = Address | LShiftU64(Indices[Level], Shifts[Level]);\r
1511\r
1512 }\r
1513}\r
1514\r
1515/**\r
1516 Debug function used to verify if the Guard page is well set or not.\r
1517\r
1518 @param[in] BaseAddress Address of memory to check.\r
1519 @param[in] NumberOfPages Size of memory in pages.\r
1520\r
1521 @return TRUE The head Guard and tail Guard are both well set.\r
1522 @return FALSE The head Guard and/or tail Guard are not well set.\r
1523**/\r
1524BOOLEAN\r
1525VerifyMemoryGuard (\r
1526 IN EFI_PHYSICAL_ADDRESS BaseAddress,\r
1527 IN UINTN NumberOfPages\r
1528 )\r
1529{\r
1530 EFI_STATUS Status;\r
1531 UINT64 Attribute;\r
1532 EFI_PHYSICAL_ADDRESS Address;\r
1533\r
1534 if (mSmmMemoryAttribute == NULL) {\r
1535 return TRUE;\r
1536 }\r
1537\r
1538 Attribute = 0;\r
1539 Address = BaseAddress - EFI_PAGE_SIZE;\r
1540 Status = mSmmMemoryAttribute->GetMemoryAttributes (\r
1541 mSmmMemoryAttribute,\r
1542 Address,\r
1543 EFI_PAGE_SIZE,\r
1544 &Attribute\r
1545 );\r
1546 if (EFI_ERROR (Status) || (Attribute & EFI_MEMORY_RP) == 0) {\r
1547 DEBUG ((DEBUG_ERROR, "Head Guard is not set at: %016lx (%016lX)!!!\r\n",\r
1548 Address, Attribute));\r
1549 DumpGuardedMemoryBitmap ();\r
1550 return FALSE;\r
1551 }\r
1552\r
1553 Attribute = 0;\r
1554 Address = BaseAddress + EFI_PAGES_TO_SIZE (NumberOfPages);\r
1555 Status = mSmmMemoryAttribute->GetMemoryAttributes (\r
1556 mSmmMemoryAttribute,\r
1557 Address,\r
1558 EFI_PAGE_SIZE,\r
1559 &Attribute\r
1560 );\r
1561 if (EFI_ERROR (Status) || (Attribute & EFI_MEMORY_RP) == 0) {\r
1562 DEBUG ((DEBUG_ERROR, "Tail Guard is not set at: %016lx (%016lX)!!!\r\n",\r
1563 Address, Attribute));\r
1564 DumpGuardedMemoryBitmap ();\r
1565 return FALSE;\r
1566 }\r
1567\r
1568 return TRUE;\r
1569}\r
1570\r