]> git.proxmox.com Git - mirror_edk2.git/blame - MdeModulePkg/Core/Dxe/Mem/HeapGuard.c
MdeModulePkg HiiDataBaseDxe: Add the check for the memory allocation return
[mirror_edk2.git] / MdeModulePkg / Core / Dxe / Mem / HeapGuard.c
CommitLineData
e63da9f0
JW
1/** @file\r
2 UEFI Heap Guard functions.\r
3\r
4Copyright (c) 2017, Intel Corporation. All rights reserved.<BR>\r
5This program and the accompanying materials\r
6are licensed and made available under the terms and conditions of the BSD License\r
7which accompanies this distribution. The full text of the license may be found at\r
8http://opensource.org/licenses/bsd-license.php\r
9\r
10THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
11WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
12\r
13**/\r
14\r
15#include "DxeMain.h"\r
16#include "Imem.h"\r
17#include "HeapGuard.h"\r
18\r
19//\r
20// Global to avoid infinite reentrance of memory allocation when updating\r
21// page table attributes, which may need allocate pages for new PDE/PTE.\r
22//\r
23GLOBAL_REMOVE_IF_UNREFERENCED BOOLEAN mOnGuarding = FALSE;\r
24\r
25//\r
26// Pointer to table tracking the Guarded memory with bitmap, in which '1'\r
27// is used to indicate memory guarded. '0' might be free memory or Guard\r
28// page itself, depending on status of memory adjacent to it.\r
29//\r
30GLOBAL_REMOVE_IF_UNREFERENCED UINT64 mGuardedMemoryMap = 0;\r
31\r
32//\r
33// Current depth level of map table pointed by mGuardedMemoryMap.\r
34// mMapLevel must be initialized at least by 1. It will be automatically\r
35// updated according to the address of memory just tracked.\r
36//\r
37GLOBAL_REMOVE_IF_UNREFERENCED UINTN mMapLevel = 1;\r
38\r
39//\r
40// Shift and mask for each level of map table\r
41//\r
42GLOBAL_REMOVE_IF_UNREFERENCED UINTN mLevelShift[GUARDED_HEAP_MAP_TABLE_DEPTH]\r
43 = GUARDED_HEAP_MAP_TABLE_DEPTH_SHIFTS;\r
44GLOBAL_REMOVE_IF_UNREFERENCED UINTN mLevelMask[GUARDED_HEAP_MAP_TABLE_DEPTH]\r
45 = GUARDED_HEAP_MAP_TABLE_DEPTH_MASKS;\r
46\r
47/**\r
48 Set corresponding bits in bitmap table to 1 according to the address.\r
49\r
50 @param[in] Address Start address to set for.\r
51 @param[in] BitNumber Number of bits to set.\r
52 @param[in] BitMap Pointer to bitmap which covers the Address.\r
53\r
54 @return VOID.\r
55**/\r
56STATIC\r
57VOID\r
58SetBits (\r
59 IN EFI_PHYSICAL_ADDRESS Address,\r
60 IN UINTN BitNumber,\r
61 IN UINT64 *BitMap\r
62 )\r
63{\r
64 UINTN Lsbs;\r
65 UINTN Qwords;\r
66 UINTN Msbs;\r
67 UINTN StartBit;\r
68 UINTN EndBit;\r
69\r
70 StartBit = (UINTN)GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address);\r
71 EndBit = (StartBit + BitNumber - 1) % GUARDED_HEAP_MAP_ENTRY_BITS;\r
72\r
73 if ((StartBit + BitNumber) > GUARDED_HEAP_MAP_ENTRY_BITS) {\r
74 Msbs = (GUARDED_HEAP_MAP_ENTRY_BITS - StartBit) %\r
75 GUARDED_HEAP_MAP_ENTRY_BITS;\r
76 Lsbs = (EndBit + 1) % GUARDED_HEAP_MAP_ENTRY_BITS;\r
77 Qwords = (BitNumber - Msbs) / GUARDED_HEAP_MAP_ENTRY_BITS;\r
78 } else {\r
79 Msbs = BitNumber;\r
80 Lsbs = 0;\r
81 Qwords = 0;\r
82 }\r
83\r
84 if (Msbs > 0) {\r
85 *BitMap |= LShiftU64 (LShiftU64 (1, Msbs) - 1, StartBit);\r
86 BitMap += 1;\r
87 }\r
88\r
89 if (Qwords > 0) {\r
90 SetMem64 ((VOID *)BitMap, Qwords * GUARDED_HEAP_MAP_ENTRY_BYTES,\r
91 (UINT64)-1);\r
92 BitMap += Qwords;\r
93 }\r
94\r
95 if (Lsbs > 0) {\r
96 *BitMap |= (LShiftU64 (1, Lsbs) - 1);\r
97 }\r
98}\r
99\r
100/**\r
101 Set corresponding bits in bitmap table to 0 according to the address.\r
102\r
103 @param[in] Address Start address to set for.\r
104 @param[in] BitNumber Number of bits to set.\r
105 @param[in] BitMap Pointer to bitmap which covers the Address.\r
106\r
107 @return VOID.\r
108**/\r
109STATIC\r
110VOID\r
111ClearBits (\r
112 IN EFI_PHYSICAL_ADDRESS Address,\r
113 IN UINTN BitNumber,\r
114 IN UINT64 *BitMap\r
115 )\r
116{\r
117 UINTN Lsbs;\r
118 UINTN Qwords;\r
119 UINTN Msbs;\r
120 UINTN StartBit;\r
121 UINTN EndBit;\r
122\r
123 StartBit = (UINTN)GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address);\r
124 EndBit = (StartBit + BitNumber - 1) % GUARDED_HEAP_MAP_ENTRY_BITS;\r
125\r
126 if ((StartBit + BitNumber) > GUARDED_HEAP_MAP_ENTRY_BITS) {\r
127 Msbs = (GUARDED_HEAP_MAP_ENTRY_BITS - StartBit) %\r
128 GUARDED_HEAP_MAP_ENTRY_BITS;\r
129 Lsbs = (EndBit + 1) % GUARDED_HEAP_MAP_ENTRY_BITS;\r
130 Qwords = (BitNumber - Msbs) / GUARDED_HEAP_MAP_ENTRY_BITS;\r
131 } else {\r
132 Msbs = BitNumber;\r
133 Lsbs = 0;\r
134 Qwords = 0;\r
135 }\r
136\r
137 if (Msbs > 0) {\r
138 *BitMap &= ~LShiftU64 (LShiftU64 (1, Msbs) - 1, StartBit);\r
139 BitMap += 1;\r
140 }\r
141\r
142 if (Qwords > 0) {\r
143 SetMem64 ((VOID *)BitMap, Qwords * GUARDED_HEAP_MAP_ENTRY_BYTES, 0);\r
144 BitMap += Qwords;\r
145 }\r
146\r
147 if (Lsbs > 0) {\r
148 *BitMap &= ~(LShiftU64 (1, Lsbs) - 1);\r
149 }\r
150}\r
151\r
152/**\r
153 Get corresponding bits in bitmap table according to the address.\r
154\r
155 The value of bit 0 corresponds to the status of memory at given Address.\r
156 No more than 64 bits can be retrieved in one call.\r
157\r
158 @param[in] Address Start address to retrieve bits for.\r
159 @param[in] BitNumber Number of bits to get.\r
160 @param[in] BitMap Pointer to bitmap which covers the Address.\r
161\r
162 @return An integer containing the bits information.\r
163**/\r
164STATIC\r
165UINT64\r
166GetBits (\r
167 IN EFI_PHYSICAL_ADDRESS Address,\r
168 IN UINTN BitNumber,\r
169 IN UINT64 *BitMap\r
170 )\r
171{\r
172 UINTN StartBit;\r
173 UINTN EndBit;\r
174 UINTN Lsbs;\r
175 UINTN Msbs;\r
176 UINT64 Result;\r
177\r
178 ASSERT (BitNumber <= GUARDED_HEAP_MAP_ENTRY_BITS);\r
179\r
180 StartBit = (UINTN)GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address);\r
181 EndBit = (StartBit + BitNumber - 1) % GUARDED_HEAP_MAP_ENTRY_BITS;\r
182\r
183 if ((StartBit + BitNumber) > GUARDED_HEAP_MAP_ENTRY_BITS) {\r
184 Msbs = GUARDED_HEAP_MAP_ENTRY_BITS - StartBit;\r
185 Lsbs = (EndBit + 1) % GUARDED_HEAP_MAP_ENTRY_BITS;\r
186 } else {\r
187 Msbs = BitNumber;\r
188 Lsbs = 0;\r
189 }\r
190\r
191 Result = RShiftU64 ((*BitMap), StartBit) & (LShiftU64 (1, Msbs) - 1);\r
192 if (Lsbs > 0) {\r
193 BitMap += 1;\r
194 Result |= LShiftU64 ((*BitMap) & (LShiftU64 (1, Lsbs) - 1), Msbs);\r
195 }\r
196\r
197 return Result;\r
198}\r
199\r
200/**\r
201 Locate the pointer of bitmap from the guarded memory bitmap tables, which\r
202 covers the given Address.\r
203\r
204 @param[in] Address Start address to search the bitmap for.\r
205 @param[in] AllocMapUnit Flag to indicate memory allocation for the table.\r
206 @param[out] BitMap Pointer to bitmap which covers the Address.\r
207\r
208 @return The bit number from given Address to the end of current map table.\r
209**/\r
210UINTN\r
211FindGuardedMemoryMap (\r
212 IN EFI_PHYSICAL_ADDRESS Address,\r
213 IN BOOLEAN AllocMapUnit,\r
214 OUT UINT64 **BitMap\r
215 )\r
216{\r
217 UINTN Level;\r
218 UINT64 *GuardMap;\r
219 UINT64 MapMemory;\r
220 UINTN Index;\r
221 UINTN Size;\r
222 UINTN BitsToUnitEnd;\r
223 EFI_STATUS Status;\r
224\r
225 //\r
226 // Adjust current map table depth according to the address to access\r
227 //\r
228 while (mMapLevel < GUARDED_HEAP_MAP_TABLE_DEPTH\r
229 &&\r
230 RShiftU64 (\r
231 Address,\r
232 mLevelShift[GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel - 1]\r
233 ) != 0) {\r
234\r
235 if (mGuardedMemoryMap != 0) {\r
236 Size = (mLevelMask[GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel - 1] + 1)\r
237 * GUARDED_HEAP_MAP_ENTRY_BYTES;\r
238 Status = CoreInternalAllocatePages (\r
239 AllocateAnyPages,\r
240 EfiBootServicesData,\r
241 EFI_SIZE_TO_PAGES (Size),\r
242 &MapMemory,\r
243 FALSE\r
244 );\r
245 ASSERT_EFI_ERROR (Status);\r
246 ASSERT (MapMemory != 0);\r
247\r
248 SetMem ((VOID *)(UINTN)MapMemory, Size, 0);\r
249\r
250 *(UINT64 *)(UINTN)MapMemory = mGuardedMemoryMap;\r
251 mGuardedMemoryMap = MapMemory;\r
252 }\r
253\r
254 mMapLevel++;\r
255\r
256 }\r
257\r
258 GuardMap = &mGuardedMemoryMap;\r
259 for (Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;\r
260 Level < GUARDED_HEAP_MAP_TABLE_DEPTH;\r
261 ++Level) {\r
262\r
263 if (*GuardMap == 0) {\r
264 if (!AllocMapUnit) {\r
265 GuardMap = NULL;\r
266 break;\r
267 }\r
268\r
269 Size = (mLevelMask[Level] + 1) * GUARDED_HEAP_MAP_ENTRY_BYTES;\r
270 Status = CoreInternalAllocatePages (\r
271 AllocateAnyPages,\r
272 EfiBootServicesData,\r
273 EFI_SIZE_TO_PAGES (Size),\r
274 &MapMemory,\r
275 FALSE\r
276 );\r
277 ASSERT_EFI_ERROR (Status);\r
278 ASSERT (MapMemory != 0);\r
279\r
280 SetMem ((VOID *)(UINTN)MapMemory, Size, 0);\r
281 *GuardMap = MapMemory;\r
282 }\r
283\r
284 Index = (UINTN)RShiftU64 (Address, mLevelShift[Level]);\r
285 Index &= mLevelMask[Level];\r
286 GuardMap = (UINT64 *)(UINTN)((*GuardMap) + Index * sizeof (UINT64));\r
287\r
288 }\r
289\r
290 BitsToUnitEnd = GUARDED_HEAP_MAP_BITS - GUARDED_HEAP_MAP_BIT_INDEX (Address);\r
291 *BitMap = GuardMap;\r
292\r
293 return BitsToUnitEnd;\r
294}\r
295\r
296/**\r
297 Set corresponding bits in bitmap table to 1 according to given memory range.\r
298\r
299 @param[in] Address Memory address to guard from.\r
300 @param[in] NumberOfPages Number of pages to guard.\r
301\r
302 @return VOID.\r
303**/\r
304VOID\r
305EFIAPI\r
306SetGuardedMemoryBits (\r
307 IN EFI_PHYSICAL_ADDRESS Address,\r
308 IN UINTN NumberOfPages\r
309 )\r
310{\r
311 UINT64 *BitMap;\r
312 UINTN Bits;\r
313 UINTN BitsToUnitEnd;\r
314\r
315 while (NumberOfPages > 0) {\r
316 BitsToUnitEnd = FindGuardedMemoryMap (Address, TRUE, &BitMap);\r
317 ASSERT (BitMap != NULL);\r
318\r
319 if (NumberOfPages > BitsToUnitEnd) {\r
320 // Cross map unit\r
321 Bits = BitsToUnitEnd;\r
322 } else {\r
323 Bits = NumberOfPages;\r
324 }\r
325\r
326 SetBits (Address, Bits, BitMap);\r
327\r
328 NumberOfPages -= Bits;\r
329 Address += EFI_PAGES_TO_SIZE (Bits);\r
330 }\r
331}\r
332\r
333/**\r
334 Clear corresponding bits in bitmap table according to given memory range.\r
335\r
336 @param[in] Address Memory address to unset from.\r
337 @param[in] NumberOfPages Number of pages to unset guard.\r
338\r
339 @return VOID.\r
340**/\r
341VOID\r
342EFIAPI\r
343ClearGuardedMemoryBits (\r
344 IN EFI_PHYSICAL_ADDRESS Address,\r
345 IN UINTN NumberOfPages\r
346 )\r
347{\r
348 UINT64 *BitMap;\r
349 UINTN Bits;\r
350 UINTN BitsToUnitEnd;\r
351\r
352 while (NumberOfPages > 0) {\r
353 BitsToUnitEnd = FindGuardedMemoryMap (Address, TRUE, &BitMap);\r
354 ASSERT (BitMap != NULL);\r
355\r
356 if (NumberOfPages > BitsToUnitEnd) {\r
357 // Cross map unit\r
358 Bits = BitsToUnitEnd;\r
359 } else {\r
360 Bits = NumberOfPages;\r
361 }\r
362\r
363 ClearBits (Address, Bits, BitMap);\r
364\r
365 NumberOfPages -= Bits;\r
366 Address += EFI_PAGES_TO_SIZE (Bits);\r
367 }\r
368}\r
369\r
370/**\r
371 Retrieve corresponding bits in bitmap table according to given memory range.\r
372\r
373 @param[in] Address Memory address to retrieve from.\r
374 @param[in] NumberOfPages Number of pages to retrieve.\r
375\r
376 @return An integer containing the guarded memory bitmap.\r
377**/\r
378UINTN\r
379GetGuardedMemoryBits (\r
380 IN EFI_PHYSICAL_ADDRESS Address,\r
381 IN UINTN NumberOfPages\r
382 )\r
383{\r
384 UINT64 *BitMap;\r
385 UINTN Bits;\r
386 UINTN Result;\r
387 UINTN Shift;\r
388 UINTN BitsToUnitEnd;\r
389\r
390 ASSERT (NumberOfPages <= GUARDED_HEAP_MAP_ENTRY_BITS);\r
391\r
392 Result = 0;\r
393 Shift = 0;\r
394 while (NumberOfPages > 0) {\r
395 BitsToUnitEnd = FindGuardedMemoryMap (Address, FALSE, &BitMap);\r
396\r
397 if (NumberOfPages > BitsToUnitEnd) {\r
398 // Cross map unit\r
399 Bits = BitsToUnitEnd;\r
400 } else {\r
401 Bits = NumberOfPages;\r
402 }\r
403\r
404 if (BitMap != NULL) {\r
405 Result |= LShiftU64 (GetBits (Address, Bits, BitMap), Shift);\r
406 }\r
407\r
408 Shift += Bits;\r
409 NumberOfPages -= Bits;\r
410 Address += EFI_PAGES_TO_SIZE (Bits);\r
411 }\r
412\r
413 return Result;\r
414}\r
415\r
416/**\r
417 Get bit value in bitmap table for the given address.\r
418\r
419 @param[in] Address The address to retrieve for.\r
420\r
421 @return 1 or 0.\r
422**/\r
423UINTN\r
424EFIAPI\r
425GetGuardMapBit (\r
426 IN EFI_PHYSICAL_ADDRESS Address\r
427 )\r
428{\r
429 UINT64 *GuardMap;\r
430\r
431 FindGuardedMemoryMap (Address, FALSE, &GuardMap);\r
432 if (GuardMap != NULL) {\r
433 if (RShiftU64 (*GuardMap,\r
434 GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address)) & 1) {\r
435 return 1;\r
436 }\r
437 }\r
438\r
439 return 0;\r
440}\r
441\r
442/**\r
443 Set the bit in bitmap table for the given address.\r
444\r
445 @param[in] Address The address to set for.\r
446\r
447 @return VOID.\r
448**/\r
449VOID\r
450EFIAPI\r
451SetGuardMapBit (\r
452 IN EFI_PHYSICAL_ADDRESS Address\r
453 )\r
454{\r
455 UINT64 *GuardMap;\r
456 UINT64 BitMask;\r
457\r
458 FindGuardedMemoryMap (Address, TRUE, &GuardMap);\r
459 if (GuardMap != NULL) {\r
460 BitMask = LShiftU64 (1, GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address));\r
461 *GuardMap |= BitMask;\r
462 }\r
463}\r
464\r
465/**\r
466 Clear the bit in bitmap table for the given address.\r
467\r
468 @param[in] Address The address to clear for.\r
469\r
470 @return VOID.\r
471**/\r
472VOID\r
473EFIAPI\r
474ClearGuardMapBit (\r
475 IN EFI_PHYSICAL_ADDRESS Address\r
476 )\r
477{\r
478 UINT64 *GuardMap;\r
479 UINT64 BitMask;\r
480\r
481 FindGuardedMemoryMap (Address, TRUE, &GuardMap);\r
482 if (GuardMap != NULL) {\r
483 BitMask = LShiftU64 (1, GUARDED_HEAP_MAP_ENTRY_BIT_INDEX (Address));\r
484 *GuardMap &= ~BitMask;\r
485 }\r
486}\r
487\r
488/**\r
489 Check to see if the page at the given address is a Guard page or not.\r
490\r
491 @param[in] Address The address to check for.\r
492\r
493 @return TRUE The page at Address is a Guard page.\r
494 @return FALSE The page at Address is not a Guard page.\r
495**/\r
496BOOLEAN\r
497EFIAPI\r
498IsGuardPage (\r
499 IN EFI_PHYSICAL_ADDRESS Address\r
500 )\r
501{\r
502 UINTN BitMap;\r
503\r
504 //\r
505 // There must be at least one guarded page before and/or after given\r
506 // address if it's a Guard page. The bitmap pattern should be one of\r
507 // 001, 100 and 101\r
508 //\r
509 BitMap = GetGuardedMemoryBits (Address - EFI_PAGE_SIZE, 3);\r
510 return ((BitMap == BIT0) || (BitMap == BIT2) || (BitMap == (BIT2 | BIT0)));\r
511}\r
512\r
513/**\r
514 Check to see if the page at the given address is a head Guard page or not.\r
515\r
516 @param[in] Address The address to check for\r
517\r
518 @return TRUE The page at Address is a head Guard page\r
519 @return FALSE The page at Address is not a head Guard page\r
520**/\r
521BOOLEAN\r
522EFIAPI\r
523IsHeadGuard (\r
524 IN EFI_PHYSICAL_ADDRESS Address\r
525 )\r
526{\r
527 return (GetGuardedMemoryBits (Address, 2) == BIT1);\r
528}\r
529\r
530/**\r
531 Check to see if the page at the given address is a tail Guard page or not.\r
532\r
533 @param[in] Address The address to check for.\r
534\r
535 @return TRUE The page at Address is a tail Guard page.\r
536 @return FALSE The page at Address is not a tail Guard page.\r
537**/\r
538BOOLEAN\r
539EFIAPI\r
540IsTailGuard (\r
541 IN EFI_PHYSICAL_ADDRESS Address\r
542 )\r
543{\r
544 return (GetGuardedMemoryBits (Address - EFI_PAGE_SIZE, 2) == BIT0);\r
545}\r
546\r
547/**\r
548 Check to see if the page at the given address is guarded or not.\r
549\r
550 @param[in] Address The address to check for.\r
551\r
552 @return TRUE The page at Address is guarded.\r
553 @return FALSE The page at Address is not guarded.\r
554**/\r
555BOOLEAN\r
556EFIAPI\r
557IsMemoryGuarded (\r
558 IN EFI_PHYSICAL_ADDRESS Address\r
559 )\r
560{\r
561 return (GetGuardMapBit (Address) == 1);\r
562}\r
563\r
564/**\r
565 Set the page at the given address to be a Guard page.\r
566\r
567 This is done by changing the page table attribute to be NOT PRSENT.\r
568\r
569 @param[in] BaseAddress Page address to Guard at\r
570\r
571 @return VOID\r
572**/\r
573VOID\r
574EFIAPI\r
575SetGuardPage (\r
576 IN EFI_PHYSICAL_ADDRESS BaseAddress\r
577 )\r
578{\r
579 //\r
580 // Set flag to make sure allocating memory without GUARD for page table\r
581 // operation; otherwise infinite loops could be caused.\r
582 //\r
583 mOnGuarding = TRUE;\r
584 //\r
585 // Note: This might overwrite other attributes needed by other features,\r
586 // such as memory protection (NX). Please make sure they are not enabled\r
587 // at the same time.\r
588 //\r
589 gCpu->SetMemoryAttributes (gCpu, BaseAddress, EFI_PAGE_SIZE, EFI_MEMORY_RP);\r
590 mOnGuarding = FALSE;\r
591}\r
592\r
593/**\r
594 Unset the Guard page at the given address to the normal memory.\r
595\r
596 This is done by changing the page table attribute to be PRSENT.\r
597\r
598 @param[in] BaseAddress Page address to Guard at.\r
599\r
600 @return VOID.\r
601**/\r
602VOID\r
603EFIAPI\r
604UnsetGuardPage (\r
605 IN EFI_PHYSICAL_ADDRESS BaseAddress\r
606 )\r
607{\r
608 //\r
609 // Set flag to make sure allocating memory without GUARD for page table\r
610 // operation; otherwise infinite loops could be caused.\r
611 //\r
612 mOnGuarding = TRUE;\r
613 //\r
614 // Note: This might overwrite other attributes needed by other features,\r
615 // such as memory protection (NX). Please make sure they are not enabled\r
616 // at the same time.\r
617 //\r
618 gCpu->SetMemoryAttributes (gCpu, BaseAddress, EFI_PAGE_SIZE, 0);\r
619 mOnGuarding = FALSE;\r
620}\r
621\r
622/**\r
623 Check to see if the memory at the given address should be guarded or not.\r
624\r
625 @param[in] MemoryType Memory type to check.\r
626 @param[in] AllocateType Allocation type to check.\r
627 @param[in] PageOrPool Indicate a page allocation or pool allocation.\r
628\r
629\r
630 @return TRUE The given type of memory should be guarded.\r
631 @return FALSE The given type of memory should not be guarded.\r
632**/\r
633BOOLEAN\r
634IsMemoryTypeToGuard (\r
635 IN EFI_MEMORY_TYPE MemoryType,\r
636 IN EFI_ALLOCATE_TYPE AllocateType,\r
637 IN UINT8 PageOrPool\r
638 )\r
639{\r
640 UINT64 TestBit;\r
641 UINT64 ConfigBit;\r
642 BOOLEAN InSmm;\r
643\r
644 if (gCpu == NULL || AllocateType == AllocateAddress) {\r
645 return FALSE;\r
646 }\r
647\r
648 InSmm = FALSE;\r
649 if (gSmmBase2 != NULL) {\r
650 gSmmBase2->InSmm (gSmmBase2, &InSmm);\r
651 }\r
652\r
653 if (InSmm) {\r
654 return FALSE;\r
655 }\r
656\r
657 if ((PcdGet8 (PcdHeapGuardPropertyMask) & PageOrPool) == 0) {\r
658 return FALSE;\r
659 }\r
660\r
661 if (PageOrPool == GUARD_HEAP_TYPE_POOL) {\r
662 ConfigBit = PcdGet64 (PcdHeapGuardPoolType);\r
663 } else if (PageOrPool == GUARD_HEAP_TYPE_PAGE) {\r
664 ConfigBit = PcdGet64 (PcdHeapGuardPageType);\r
665 } else {\r
666 ConfigBit = (UINT64)-1;\r
667 }\r
668\r
669 if ((UINT32)MemoryType >= MEMORY_TYPE_OS_RESERVED_MIN) {\r
670 TestBit = BIT63;\r
671 } else if ((UINT32) MemoryType >= MEMORY_TYPE_OEM_RESERVED_MIN) {\r
672 TestBit = BIT62;\r
673 } else if (MemoryType < EfiMaxMemoryType) {\r
674 TestBit = LShiftU64 (1, MemoryType);\r
675 } else if (MemoryType == EfiMaxMemoryType) {\r
676 TestBit = (UINT64)-1;\r
677 } else {\r
678 TestBit = 0;\r
679 }\r
680\r
681 return ((ConfigBit & TestBit) != 0);\r
682}\r
683\r
684/**\r
685 Check to see if the pool at the given address should be guarded or not.\r
686\r
687 @param[in] MemoryType Pool type to check.\r
688\r
689\r
690 @return TRUE The given type of pool should be guarded.\r
691 @return FALSE The given type of pool should not be guarded.\r
692**/\r
693BOOLEAN\r
694IsPoolTypeToGuard (\r
695 IN EFI_MEMORY_TYPE MemoryType\r
696 )\r
697{\r
698 return IsMemoryTypeToGuard (MemoryType, AllocateAnyPages,\r
699 GUARD_HEAP_TYPE_POOL);\r
700}\r
701\r
702/**\r
703 Check to see if the page at the given address should be guarded or not.\r
704\r
705 @param[in] MemoryType Page type to check.\r
706 @param[in] AllocateType Allocation type to check.\r
707\r
708 @return TRUE The given type of page should be guarded.\r
709 @return FALSE The given type of page should not be guarded.\r
710**/\r
711BOOLEAN\r
712IsPageTypeToGuard (\r
713 IN EFI_MEMORY_TYPE MemoryType,\r
714 IN EFI_ALLOCATE_TYPE AllocateType\r
715 )\r
716{\r
717 return IsMemoryTypeToGuard (MemoryType, AllocateType, GUARD_HEAP_TYPE_PAGE);\r
718}\r
719\r
720/**\r
721 Set head Guard and tail Guard for the given memory range.\r
722\r
723 @param[in] Memory Base address of memory to set guard for.\r
724 @param[in] NumberOfPages Memory size in pages.\r
725\r
726 @return VOID\r
727**/\r
728VOID\r
729SetGuardForMemory (\r
730 IN EFI_PHYSICAL_ADDRESS Memory,\r
731 IN UINTN NumberOfPages\r
732 )\r
733{\r
734 EFI_PHYSICAL_ADDRESS GuardPage;\r
735\r
736 //\r
737 // Set tail Guard\r
738 //\r
739 GuardPage = Memory + EFI_PAGES_TO_SIZE (NumberOfPages);\r
740 if (!IsGuardPage (GuardPage)) {\r
741 SetGuardPage (GuardPage);\r
742 }\r
743\r
744 // Set head Guard\r
745 GuardPage = Memory - EFI_PAGES_TO_SIZE (1);\r
746 if (!IsGuardPage (GuardPage)) {\r
747 SetGuardPage (GuardPage);\r
748 }\r
749\r
750 //\r
751 // Mark the memory range as Guarded\r
752 //\r
753 SetGuardedMemoryBits (Memory, NumberOfPages);\r
754}\r
755\r
756/**\r
757 Unset head Guard and tail Guard for the given memory range.\r
758\r
759 @param[in] Memory Base address of memory to unset guard for.\r
760 @param[in] NumberOfPages Memory size in pages.\r
761\r
762 @return VOID\r
763**/\r
764VOID\r
765UnsetGuardForMemory (\r
766 IN EFI_PHYSICAL_ADDRESS Memory,\r
767 IN UINTN NumberOfPages\r
768 )\r
769{\r
770 EFI_PHYSICAL_ADDRESS GuardPage;\r
6cf0a677 771 UINT64 GuardBitmap;\r
e63da9f0
JW
772\r
773 if (NumberOfPages == 0) {\r
774 return;\r
775 }\r
776\r
777 //\r
778 // Head Guard must be one page before, if any.\r
779 //\r
6cf0a677
JW
780 // MSB-> 1 0 <-LSB\r
781 // -------------------\r
782 // Head Guard -> 0 1 -> Don't free Head Guard (shared Guard)\r
783 // Head Guard -> 0 0 -> Free Head Guard either (not shared Guard)\r
784 // 1 X -> Don't free first page (need a new Guard)\r
785 // (it'll be turned into a Guard page later)\r
786 // -------------------\r
787 // Start -> -1 -2\r
788 //\r
e63da9f0 789 GuardPage = Memory - EFI_PAGES_TO_SIZE (1);\r
6cf0a677
JW
790 GuardBitmap = GetGuardedMemoryBits (Memory - EFI_PAGES_TO_SIZE (2), 2);\r
791 if ((GuardBitmap & BIT1) == 0) {\r
792 //\r
793 // Head Guard exists.\r
794 //\r
795 if ((GuardBitmap & BIT0) == 0) {\r
e63da9f0
JW
796 //\r
797 // If the head Guard is not a tail Guard of adjacent memory block,\r
798 // unset it.\r
799 //\r
800 UnsetGuardPage (GuardPage);\r
801 }\r
6cf0a677 802 } else {\r
e63da9f0
JW
803 //\r
804 // Pages before memory to free are still in Guard. It's a partial free\r
805 // case. Turn first page of memory block to free into a new Guard.\r
806 //\r
807 SetGuardPage (Memory);\r
808 }\r
809\r
810 //\r
811 // Tail Guard must be the page after this memory block to free, if any.\r
812 //\r
6cf0a677
JW
813 // MSB-> 1 0 <-LSB\r
814 // --------------------\r
815 // 1 0 <- Tail Guard -> Don't free Tail Guard (shared Guard)\r
816 // 0 0 <- Tail Guard -> Free Tail Guard either (not shared Guard)\r
817 // X 1 -> Don't free last page (need a new Guard)\r
818 // (it'll be turned into a Guard page later)\r
819 // --------------------\r
820 // +1 +0 <- End\r
821 //\r
e63da9f0 822 GuardPage = Memory + EFI_PAGES_TO_SIZE (NumberOfPages);\r
6cf0a677
JW
823 GuardBitmap = GetGuardedMemoryBits (GuardPage, 2);\r
824 if ((GuardBitmap & BIT0) == 0) {\r
825 //\r
826 // Tail Guard exists.\r
827 //\r
828 if ((GuardBitmap & BIT1) == 0) {\r
e63da9f0
JW
829 //\r
830 // If the tail Guard is not a head Guard of adjacent memory block,\r
831 // free it; otherwise, keep it.\r
832 //\r
833 UnsetGuardPage (GuardPage);\r
834 }\r
6cf0a677 835 } else {\r
e63da9f0
JW
836 //\r
837 // Pages after memory to free are still in Guard. It's a partial free\r
838 // case. We need to keep one page to be a head Guard.\r
839 //\r
840 SetGuardPage (GuardPage - EFI_PAGES_TO_SIZE (1));\r
841 }\r
842\r
843 //\r
844 // No matter what, we just clear the mark of the Guarded memory.\r
845 //\r
846 ClearGuardedMemoryBits(Memory, NumberOfPages);\r
847}\r
848\r
849/**\r
850 Adjust address of free memory according to existing and/or required Guard.\r
851\r
852 This function will check if there're existing Guard pages of adjacent\r
853 memory blocks, and try to use it as the Guard page of the memory to be\r
854 allocated.\r
855\r
856 @param[in] Start Start address of free memory block.\r
857 @param[in] Size Size of free memory block.\r
858 @param[in] SizeRequested Size of memory to allocate.\r
859\r
860 @return The end address of memory block found.\r
861 @return 0 if no enough space for the required size of memory and its Guard.\r
862**/\r
863UINT64\r
864AdjustMemoryS (\r
865 IN UINT64 Start,\r
866 IN UINT64 Size,\r
867 IN UINT64 SizeRequested\r
868 )\r
869{\r
870 UINT64 Target;\r
871\r
872 Target = Start + Size - SizeRequested;\r
873\r
874 //\r
875 // At least one more page needed for Guard page.\r
876 //\r
877 if (Size < (SizeRequested + EFI_PAGES_TO_SIZE (1))) {\r
878 return 0;\r
879 }\r
880\r
881 if (!IsGuardPage (Start + Size)) {\r
882 // No Guard at tail to share. One more page is needed.\r
883 Target -= EFI_PAGES_TO_SIZE (1);\r
884 }\r
885\r
886 // Out of range?\r
887 if (Target < Start) {\r
888 return 0;\r
889 }\r
890\r
891 // At the edge?\r
892 if (Target == Start) {\r
893 if (!IsGuardPage (Target - EFI_PAGES_TO_SIZE (1))) {\r
894 // No enough space for a new head Guard if no Guard at head to share.\r
895 return 0;\r
896 }\r
897 }\r
898\r
899 // OK, we have enough pages for memory and its Guards. Return the End of the\r
900 // free space.\r
901 return Target + SizeRequested - 1;\r
902}\r
903\r
904/**\r
905 Adjust the start address and number of pages to free according to Guard.\r
906\r
907 The purpose of this function is to keep the shared Guard page with adjacent\r
908 memory block if it's still in guard, or free it if no more sharing. Another\r
909 is to reserve pages as Guard pages in partial page free situation.\r
910\r
911 @param[in,out] Memory Base address of memory to free.\r
912 @param[in,out] NumberOfPages Size of memory to free.\r
913\r
914 @return VOID.\r
915**/\r
916VOID\r
917AdjustMemoryF (\r
918 IN OUT EFI_PHYSICAL_ADDRESS *Memory,\r
919 IN OUT UINTN *NumberOfPages\r
920 )\r
921{\r
922 EFI_PHYSICAL_ADDRESS Start;\r
923 EFI_PHYSICAL_ADDRESS MemoryToTest;\r
924 UINTN PagesToFree;\r
6cf0a677 925 UINT64 GuardBitmap;\r
e63da9f0
JW
926\r
927 if (Memory == NULL || NumberOfPages == NULL || *NumberOfPages == 0) {\r
928 return;\r
929 }\r
930\r
931 Start = *Memory;\r
932 PagesToFree = *NumberOfPages;\r
933\r
934 //\r
935 // Head Guard must be one page before, if any.\r
936 //\r
6cf0a677
JW
937 // MSB-> 1 0 <-LSB\r
938 // -------------------\r
939 // Head Guard -> 0 1 -> Don't free Head Guard (shared Guard)\r
940 // Head Guard -> 0 0 -> Free Head Guard either (not shared Guard)\r
941 // 1 X -> Don't free first page (need a new Guard)\r
942 // (it'll be turned into a Guard page later)\r
943 // -------------------\r
944 // Start -> -1 -2\r
945 //\r
946 MemoryToTest = Start - EFI_PAGES_TO_SIZE (2);\r
947 GuardBitmap = GetGuardedMemoryBits (MemoryToTest, 2);\r
948 if ((GuardBitmap & BIT1) == 0) {\r
949 //\r
950 // Head Guard exists.\r
951 //\r
952 if ((GuardBitmap & BIT0) == 0) {\r
e63da9f0
JW
953 //\r
954 // If the head Guard is not a tail Guard of adjacent memory block,\r
955 // free it; otherwise, keep it.\r
956 //\r
957 Start -= EFI_PAGES_TO_SIZE (1);\r
958 PagesToFree += 1;\r
959 }\r
6cf0a677 960 } else {\r
e63da9f0 961 //\r
6cf0a677
JW
962 // No Head Guard, and pages before memory to free are still in Guard. It's a\r
963 // partial free case. We need to keep one page to be a tail Guard.\r
e63da9f0
JW
964 //\r
965 Start += EFI_PAGES_TO_SIZE (1);\r
966 PagesToFree -= 1;\r
967 }\r
968\r
969 //\r
970 // Tail Guard must be the page after this memory block to free, if any.\r
971 //\r
6cf0a677
JW
972 // MSB-> 1 0 <-LSB\r
973 // --------------------\r
974 // 1 0 <- Tail Guard -> Don't free Tail Guard (shared Guard)\r
975 // 0 0 <- Tail Guard -> Free Tail Guard either (not shared Guard)\r
976 // X 1 -> Don't free last page (need a new Guard)\r
977 // (it'll be turned into a Guard page later)\r
978 // --------------------\r
979 // +1 +0 <- End\r
980 //\r
e63da9f0 981 MemoryToTest = Start + EFI_PAGES_TO_SIZE (PagesToFree);\r
6cf0a677
JW
982 GuardBitmap = GetGuardedMemoryBits (MemoryToTest, 2);\r
983 if ((GuardBitmap & BIT0) == 0) {\r
984 //\r
985 // Tail Guard exists.\r
986 //\r
987 if ((GuardBitmap & BIT1) == 0) {\r
e63da9f0
JW
988 //\r
989 // If the tail Guard is not a head Guard of adjacent memory block,\r
990 // free it; otherwise, keep it.\r
991 //\r
992 PagesToFree += 1;\r
993 }\r
6cf0a677 994 } else if (PagesToFree > 0) {\r
e63da9f0 995 //\r
6cf0a677
JW
996 // No Tail Guard, and pages after memory to free are still in Guard. It's a\r
997 // partial free case. We need to keep one page to be a head Guard.\r
e63da9f0
JW
998 //\r
999 PagesToFree -= 1;\r
1000 }\r
1001\r
1002 *Memory = Start;\r
1003 *NumberOfPages = PagesToFree;\r
1004}\r
1005\r
1006/**\r
1007 Adjust the base and number of pages to really allocate according to Guard.\r
1008\r
1009 @param[in,out] Memory Base address of free memory.\r
1010 @param[in,out] NumberOfPages Size of memory to allocate.\r
1011\r
1012 @return VOID.\r
1013**/\r
1014VOID\r
1015AdjustMemoryA (\r
1016 IN OUT EFI_PHYSICAL_ADDRESS *Memory,\r
1017 IN OUT UINTN *NumberOfPages\r
1018 )\r
1019{\r
1020 //\r
1021 // FindFreePages() has already taken the Guard into account. It's safe to\r
1022 // adjust the start address and/or number of pages here, to make sure that\r
1023 // the Guards are also "allocated".\r
1024 //\r
1025 if (!IsGuardPage (*Memory + EFI_PAGES_TO_SIZE (*NumberOfPages))) {\r
1026 // No tail Guard, add one.\r
1027 *NumberOfPages += 1;\r
1028 }\r
1029\r
1030 if (!IsGuardPage (*Memory - EFI_PAGE_SIZE)) {\r
1031 // No head Guard, add one.\r
1032 *Memory -= EFI_PAGE_SIZE;\r
1033 *NumberOfPages += 1;\r
1034 }\r
1035}\r
1036\r
1037/**\r
1038 Adjust the pool head position to make sure the Guard page is adjavent to\r
1039 pool tail or pool head.\r
1040\r
1041 @param[in] Memory Base address of memory allocated.\r
1042 @param[in] NoPages Number of pages actually allocated.\r
1043 @param[in] Size Size of memory requested.\r
1044 (plus pool head/tail overhead)\r
1045\r
1046 @return Address of pool head.\r
1047**/\r
1048VOID *\r
1049AdjustPoolHeadA (\r
1050 IN EFI_PHYSICAL_ADDRESS Memory,\r
1051 IN UINTN NoPages,\r
1052 IN UINTN Size\r
1053 )\r
1054{\r
1055 if ((PcdGet8 (PcdHeapGuardPropertyMask) & BIT7) != 0) {\r
1056 //\r
1057 // Pool head is put near the head Guard\r
1058 //\r
1059 return (VOID *)(UINTN)Memory;\r
1060 }\r
1061\r
1062 //\r
1063 // Pool head is put near the tail Guard\r
1064 //\r
1065 return (VOID *)(UINTN)(Memory + EFI_PAGES_TO_SIZE (NoPages) - Size);\r
1066}\r
1067\r
1068/**\r
1069 Get the page base address according to pool head address.\r
1070\r
1071 @param[in] Memory Head address of pool to free.\r
1072\r
1073 @return Address of pool head.\r
1074**/\r
1075VOID *\r
1076AdjustPoolHeadF (\r
1077 IN EFI_PHYSICAL_ADDRESS Memory\r
1078 )\r
1079{\r
1080 if ((PcdGet8 (PcdHeapGuardPropertyMask) & BIT7) != 0) {\r
1081 //\r
1082 // Pool head is put near the head Guard\r
1083 //\r
1084 return (VOID *)(UINTN)Memory;\r
1085 }\r
1086\r
1087 //\r
1088 // Pool head is put near the tail Guard\r
1089 //\r
1090 return (VOID *)(UINTN)(Memory & ~EFI_PAGE_MASK);\r
1091}\r
1092\r
1093/**\r
1094 Allocate or free guarded memory.\r
1095\r
1096 @param[in] Start Start address of memory to allocate or free.\r
1097 @param[in] NumberOfPages Memory size in pages.\r
1098 @param[in] NewType Memory type to convert to.\r
1099\r
1100 @return VOID.\r
1101**/\r
1102EFI_STATUS\r
1103CoreConvertPagesWithGuard (\r
1104 IN UINT64 Start,\r
1105 IN UINTN NumberOfPages,\r
1106 IN EFI_MEMORY_TYPE NewType\r
1107 )\r
1108{\r
1109 if (NewType == EfiConventionalMemory) {\r
1110 AdjustMemoryF (&Start, &NumberOfPages);\r
6cf0a677
JW
1111 if (NumberOfPages == 0) {\r
1112 return EFI_SUCCESS;\r
1113 }\r
e63da9f0
JW
1114 } else {\r
1115 AdjustMemoryA (&Start, &NumberOfPages);\r
1116 }\r
1117\r
6cf0a677 1118 return CoreConvertPages (Start, NumberOfPages, NewType);\r
e63da9f0
JW
1119}\r
1120\r
1121/**\r
1122 Helper function to convert a UINT64 value in binary to a string.\r
1123\r
1124 @param[in] Value Value of a UINT64 integer.\r
1125 @param[out] BinString String buffer to contain the conversion result.\r
1126\r
1127 @return VOID.\r
1128**/\r
1129VOID\r
1130Uint64ToBinString (\r
1131 IN UINT64 Value,\r
1132 OUT CHAR8 *BinString\r
1133 )\r
1134{\r
1135 UINTN Index;\r
1136\r
1137 if (BinString == NULL) {\r
1138 return;\r
1139 }\r
1140\r
1141 for (Index = 64; Index > 0; --Index) {\r
1142 BinString[Index - 1] = '0' + (Value & 1);\r
1143 Value = RShiftU64 (Value, 1);\r
1144 }\r
1145 BinString[64] = '\0';\r
1146}\r
1147\r
1148/**\r
1149 Dump the guarded memory bit map.\r
1150**/\r
1151VOID\r
1152EFIAPI\r
1153DumpGuardedMemoryBitmap (\r
1154 VOID\r
1155 )\r
1156{\r
1157 UINTN Entries[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1158 UINTN Shifts[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1159 UINTN Indices[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1160 UINT64 Tables[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1161 UINT64 Addresses[GUARDED_HEAP_MAP_TABLE_DEPTH];\r
1162 UINT64 TableEntry;\r
1163 UINT64 Address;\r
1164 INTN Level;\r
1165 UINTN RepeatZero;\r
1166 CHAR8 String[GUARDED_HEAP_MAP_ENTRY_BITS + 1];\r
1167 CHAR8 *Ruler1;\r
1168 CHAR8 *Ruler2;\r
1169\r
c6c50165
JW
1170 if (mGuardedMemoryMap == 0 ||\r
1171 mMapLevel == 0 ||\r
1172 mMapLevel > GUARDED_HEAP_MAP_TABLE_DEPTH) {\r
e63da9f0
JW
1173 return;\r
1174 }\r
1175\r
1176 Ruler1 = " 3 2 1 0";\r
1177 Ruler2 = "FEDCBA9876543210FEDCBA9876543210FEDCBA9876543210FEDCBA9876543210";\r
1178\r
1179 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "============================="\r
1180 " Guarded Memory Bitmap "\r
1181 "==============================\r\n"));\r
1182 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, " %a\r\n", Ruler1));\r
1183 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, " %a\r\n", Ruler2));\r
1184\r
1185 CopyMem (Entries, mLevelMask, sizeof (Entries));\r
1186 CopyMem (Shifts, mLevelShift, sizeof (Shifts));\r
1187\r
1188 SetMem (Indices, sizeof(Indices), 0);\r
1189 SetMem (Tables, sizeof(Tables), 0);\r
1190 SetMem (Addresses, sizeof(Addresses), 0);\r
1191\r
1192 Level = GUARDED_HEAP_MAP_TABLE_DEPTH - mMapLevel;\r
1193 Tables[Level] = mGuardedMemoryMap;\r
1194 Address = 0;\r
1195 RepeatZero = 0;\r
1196\r
1197 while (TRUE) {\r
1198 if (Indices[Level] > Entries[Level]) {\r
1199\r
1200 Tables[Level] = 0;\r
1201 Level -= 1;\r
1202 RepeatZero = 0;\r
1203\r
1204 DEBUG ((\r
1205 HEAP_GUARD_DEBUG_LEVEL,\r
1206 "========================================="\r
1207 "=========================================\r\n"\r
1208 ));\r
1209\r
1210 } else {\r
1211\r
1212 TableEntry = ((UINT64 *)(UINTN)Tables[Level])[Indices[Level]];\r
1213 Address = Addresses[Level];\r
1214\r
1215 if (TableEntry == 0) {\r
1216\r
1217 if (Level == GUARDED_HEAP_MAP_TABLE_DEPTH - 1) {\r
1218 if (RepeatZero == 0) {\r
1219 Uint64ToBinString(TableEntry, String);\r
1220 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "%016lx: %a\r\n", Address, String));\r
1221 } else if (RepeatZero == 1) {\r
1222 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "... : ...\r\n"));\r
1223 }\r
1224 RepeatZero += 1;\r
1225 }\r
1226\r
1227 } else if (Level < GUARDED_HEAP_MAP_TABLE_DEPTH - 1) {\r
1228\r
1229 Level += 1;\r
1230 Tables[Level] = TableEntry;\r
1231 Addresses[Level] = Address;\r
1232 Indices[Level] = 0;\r
1233 RepeatZero = 0;\r
1234\r
1235 continue;\r
1236\r
1237 } else {\r
1238\r
1239 RepeatZero = 0;\r
1240 Uint64ToBinString(TableEntry, String);\r
1241 DEBUG ((HEAP_GUARD_DEBUG_LEVEL, "%016lx: %a\r\n", Address, String));\r
1242\r
1243 }\r
1244 }\r
1245\r
1246 if (Level < (GUARDED_HEAP_MAP_TABLE_DEPTH - (INTN)mMapLevel)) {\r
1247 break;\r
1248 }\r
1249\r
1250 Indices[Level] += 1;\r
1251 Address = (Level == 0) ? 0 : Addresses[Level - 1];\r
1252 Addresses[Level] = Address | LShiftU64(Indices[Level], Shifts[Level]);\r
1253\r
1254 }\r
1255}\r
1256\r