]> git.proxmox.com Git - mirror_edk2.git/blame - MdeModulePkg/Core/Dxe/Mem/HeapGuard.h
UefiCpuPkg: Move AsmRelocateApLoopStart from Mpfuncs.nasm to AmdSev.nasm
[mirror_edk2.git] / MdeModulePkg / Core / Dxe / Mem / HeapGuard.h
CommitLineData
e63da9f0
JW
1/** @file\r
2 Data type, macros and function prototypes of heap guard feature.\r
3\r
63ebde8e 4Copyright (c) 2017-2018, Intel Corporation. All rights reserved.<BR>\r
9d510e61 5SPDX-License-Identifier: BSD-2-Clause-Patent\r
e63da9f0
JW
6\r
7**/\r
8\r
9#ifndef _HEAPGUARD_H_\r
10#define _HEAPGUARD_H_\r
11\r
12//\r
13// Following macros are used to define and access the guarded memory bitmap\r
14// table.\r
15//\r
16// To simplify the access and reduce the memory used for this table, the\r
17// table is constructed in the similar way as page table structure but in\r
18// reverse direction, i.e. from bottom growing up to top.\r
19//\r
20// - 1-bit tracks 1 page (4KB)\r
21// - 1-UINT64 map entry tracks 256KB memory\r
22// - 1K-UINT64 map table tracks 256MB memory\r
23// - Five levels of tables can track any address of memory of 64-bit\r
24// system, like below.\r
25//\r
26// 512 * 512 * 512 * 512 * 1K * 64b * 4K\r
27// 111111111 111111111 111111111 111111111 1111111111 111111 111111111111\r
28// 63 54 45 36 27 17 11 0\r
29// 9b 9b 9b 9b 10b 6b 12b\r
30// L0 -> L1 -> L2 -> L3 -> L4 -> bits -> page\r
31// 1FF 1FF 1FF 1FF 3FF 3F FFF\r
32//\r
33// L4 table has 1K * sizeof(UINT64) = 8K (2-page), which can track 256MB\r
34// memory. Each table of L0-L3 will be allocated when its memory address\r
35// range is to be tracked. Only 1-page will be allocated each time. This\r
36// can save memories used to establish this map table.\r
37//\r
38// For a normal configuration of system with 4G memory, two levels of tables\r
39// can track the whole memory, because two levels (L3+L4) of map tables have\r
40// already coverred 37-bit of memory address. And for a normal UEFI BIOS,\r
41// less than 128M memory would be consumed during boot. That means we just\r
42// need\r
43//\r
44// 1-page (L3) + 2-page (L4)\r
45//\r
46// memory (3 pages) to track the memory allocation works. In this case,\r
47// there's no need to setup L0-L2 tables.\r
48//\r
49\r
50//\r
51// Each entry occupies 8B/64b. 1-page can hold 512 entries, which spans 9\r
52// bits in address. (512 = 1 << 9)\r
53//\r
1436aea4 54#define BYTE_LENGTH_SHIFT 3 // (8 = 1 << 3)\r
e63da9f0
JW
55\r
56#define GUARDED_HEAP_MAP_TABLE_ENTRY_SHIFT \\r
57 (EFI_PAGE_SHIFT - BYTE_LENGTH_SHIFT)\r
58\r
1436aea4 59#define GUARDED_HEAP_MAP_TABLE_DEPTH 5\r
e63da9f0
JW
60\r
61// Use UINT64_index + bit_index_of_UINT64 to locate the bit in may\r
1436aea4 62#define GUARDED_HEAP_MAP_ENTRY_BIT_SHIFT 6 // (64 = 1 << 6)\r
e63da9f0
JW
63\r
64#define GUARDED_HEAP_MAP_ENTRY_BITS \\r
65 (1 << GUARDED_HEAP_MAP_ENTRY_BIT_SHIFT)\r
66\r
67#define GUARDED_HEAP_MAP_ENTRY_BYTES \\r
68 (GUARDED_HEAP_MAP_ENTRY_BITS / 8)\r
69\r
70// L4 table address width: 64 - 9 * 4 - 6 - 12 = 10b\r
71#define GUARDED_HEAP_MAP_ENTRY_SHIFT \\r
72 (GUARDED_HEAP_MAP_ENTRY_BITS \\r
73 - GUARDED_HEAP_MAP_TABLE_ENTRY_SHIFT * 4 \\r
74 - GUARDED_HEAP_MAP_ENTRY_BIT_SHIFT \\r
75 - EFI_PAGE_SHIFT)\r
76\r
77// L4 table address mask: (1 << 10 - 1) = 0x3FF\r
78#define GUARDED_HEAP_MAP_ENTRY_MASK \\r
79 ((1 << GUARDED_HEAP_MAP_ENTRY_SHIFT) - 1)\r
80\r
81// Size of each L4 table: (1 << 10) * 8 = 8KB = 2-page\r
82#define GUARDED_HEAP_MAP_SIZE \\r
83 ((1 << GUARDED_HEAP_MAP_ENTRY_SHIFT) * GUARDED_HEAP_MAP_ENTRY_BYTES)\r
84\r
85// Memory size tracked by one L4 table: 8KB * 8 * 4KB = 256MB\r
86#define GUARDED_HEAP_MAP_UNIT_SIZE \\r
87 (GUARDED_HEAP_MAP_SIZE * 8 * EFI_PAGE_SIZE)\r
88\r
89// L4 table entry number: 8KB / 8 = 1024\r
90#define GUARDED_HEAP_MAP_ENTRIES_PER_UNIT \\r
91 (GUARDED_HEAP_MAP_SIZE / GUARDED_HEAP_MAP_ENTRY_BYTES)\r
92\r
93// L4 table entry indexing\r
94#define GUARDED_HEAP_MAP_ENTRY_INDEX(Address) \\r
95 (RShiftU64 (Address, EFI_PAGE_SHIFT \\r
96 + GUARDED_HEAP_MAP_ENTRY_BIT_SHIFT) \\r
97 & GUARDED_HEAP_MAP_ENTRY_MASK)\r
98\r
99// L4 table entry bit indexing\r
100#define GUARDED_HEAP_MAP_ENTRY_BIT_INDEX(Address) \\r
101 (RShiftU64 (Address, EFI_PAGE_SHIFT) \\r
102 & ((1 << GUARDED_HEAP_MAP_ENTRY_BIT_SHIFT) - 1))\r
103\r
104//\r
105// Total bits (pages) tracked by one L4 table (65536-bit)\r
106//\r
107#define GUARDED_HEAP_MAP_BITS \\r
108 (1 << (GUARDED_HEAP_MAP_ENTRY_SHIFT \\r
109 + GUARDED_HEAP_MAP_ENTRY_BIT_SHIFT))\r
110\r
111//\r
112// Bit indexing inside the whole L4 table (0 - 65535)\r
113//\r
114#define GUARDED_HEAP_MAP_BIT_INDEX(Address) \\r
115 (RShiftU64 (Address, EFI_PAGE_SHIFT) \\r
116 & ((1 << (GUARDED_HEAP_MAP_ENTRY_SHIFT \\r
117 + GUARDED_HEAP_MAP_ENTRY_BIT_SHIFT)) - 1))\r
118\r
119//\r
120// Memory address bit width tracked by L4 table: 10 + 6 + 12 = 28\r
121//\r
122#define GUARDED_HEAP_MAP_TABLE_SHIFT \\r
123 (GUARDED_HEAP_MAP_ENTRY_SHIFT + GUARDED_HEAP_MAP_ENTRY_BIT_SHIFT \\r
124 + EFI_PAGE_SHIFT)\r
125\r
126//\r
127// Macro used to initialize the local array variable for map table traversing\r
128// {55, 46, 37, 28, 18}\r
129//\r
130#define GUARDED_HEAP_MAP_TABLE_DEPTH_SHIFTS \\r
131 { \\r
132 GUARDED_HEAP_MAP_TABLE_SHIFT + GUARDED_HEAP_MAP_TABLE_ENTRY_SHIFT * 3, \\r
133 GUARDED_HEAP_MAP_TABLE_SHIFT + GUARDED_HEAP_MAP_TABLE_ENTRY_SHIFT * 2, \\r
134 GUARDED_HEAP_MAP_TABLE_SHIFT + GUARDED_HEAP_MAP_TABLE_ENTRY_SHIFT, \\r
135 GUARDED_HEAP_MAP_TABLE_SHIFT, \\r
136 EFI_PAGE_SHIFT + GUARDED_HEAP_MAP_ENTRY_BIT_SHIFT \\r
137 }\r
138\r
139//\r
140// Masks used to extract address range of each level of table\r
141// {0x1FF, 0x1FF, 0x1FF, 0x1FF, 0x3FF}\r
142//\r
143#define GUARDED_HEAP_MAP_TABLE_DEPTH_MASKS \\r
144 { \\r
145 (1 << GUARDED_HEAP_MAP_TABLE_ENTRY_SHIFT) - 1, \\r
146 (1 << GUARDED_HEAP_MAP_TABLE_ENTRY_SHIFT) - 1, \\r
147 (1 << GUARDED_HEAP_MAP_TABLE_ENTRY_SHIFT) - 1, \\r
148 (1 << GUARDED_HEAP_MAP_TABLE_ENTRY_SHIFT) - 1, \\r
149 (1 << GUARDED_HEAP_MAP_ENTRY_SHIFT) - 1 \\r
150 }\r
151\r
152//\r
153// Memory type to guard (matching the related PCD definition)\r
154//\r
1436aea4
MK
155#define GUARD_HEAP_TYPE_PAGE BIT0\r
156#define GUARD_HEAP_TYPE_POOL BIT1\r
157#define GUARD_HEAP_TYPE_FREED BIT4\r
63ebde8e
JW
158#define GUARD_HEAP_TYPE_ALL \\r
159 (GUARD_HEAP_TYPE_PAGE|GUARD_HEAP_TYPE_POOL|GUARD_HEAP_TYPE_FREED)\r
e63da9f0
JW
160\r
161//\r
162// Debug message level\r
163//\r
164#define HEAP_GUARD_DEBUG_LEVEL (DEBUG_POOL|DEBUG_PAGE)\r
165\r
166typedef struct {\r
1436aea4
MK
167 UINT32 TailMark;\r
168 UINT32 HeadMark;\r
169 EFI_PHYSICAL_ADDRESS Address;\r
170 LIST_ENTRY Link;\r
e63da9f0
JW
171} HEAP_GUARD_NODE;\r
172\r
173/**\r
174 Internal function. Converts a memory range to the specified type.\r
175 The range must exist in the memory map.\r
176\r
177 @param Start The first address of the range Must be page\r
178 aligned.\r
179 @param NumberOfPages The number of pages to convert.\r
180 @param NewType The new type for the memory range.\r
181\r
182 @retval EFI_INVALID_PARAMETER Invalid parameter.\r
183 @retval EFI_NOT_FOUND Could not find a descriptor cover the specified\r
184 range or convertion not allowed.\r
185 @retval EFI_SUCCESS Successfully converts the memory range to the\r
186 specified type.\r
187\r
188**/\r
189EFI_STATUS\r
190CoreConvertPages (\r
191 IN UINT64 Start,\r
192 IN UINT64 NumberOfPages,\r
193 IN EFI_MEMORY_TYPE NewType\r
194 );\r
195\r
196/**\r
197 Allocate or free guarded memory.\r
198\r
199 @param[in] Start Start address of memory to allocate or free.\r
200 @param[in] NumberOfPages Memory size in pages.\r
201 @param[in] NewType Memory type to convert to.\r
202\r
203 @return VOID.\r
204**/\r
205EFI_STATUS\r
206CoreConvertPagesWithGuard (\r
207 IN UINT64 Start,\r
208 IN UINTN NumberOfPages,\r
209 IN EFI_MEMORY_TYPE NewType\r
210 );\r
211\r
212/**\r
213 Set head Guard and tail Guard for the given memory range.\r
214\r
215 @param[in] Memory Base address of memory to set guard for.\r
216 @param[in] NumberOfPages Memory size in pages.\r
217\r
218 @return VOID.\r
219**/\r
220VOID\r
221SetGuardForMemory (\r
1436aea4
MK
222 IN EFI_PHYSICAL_ADDRESS Memory,\r
223 IN UINTN NumberOfPages\r
e63da9f0
JW
224 );\r
225\r
226/**\r
227 Unset head Guard and tail Guard for the given memory range.\r
228\r
229 @param[in] Memory Base address of memory to unset guard for.\r
230 @param[in] NumberOfPages Memory size in pages.\r
231\r
232 @return VOID.\r
233**/\r
234VOID\r
235UnsetGuardForMemory (\r
1436aea4
MK
236 IN EFI_PHYSICAL_ADDRESS Memory,\r
237 IN UINTN NumberOfPages\r
e63da9f0
JW
238 );\r
239\r
240/**\r
241 Adjust the base and number of pages to really allocate according to Guard.\r
242\r
243 @param[in,out] Memory Base address of free memory.\r
244 @param[in,out] NumberOfPages Size of memory to allocate.\r
245\r
246 @return VOID.\r
247**/\r
248VOID\r
249AdjustMemoryA (\r
1436aea4
MK
250 IN OUT EFI_PHYSICAL_ADDRESS *Memory,\r
251 IN OUT UINTN *NumberOfPages\r
e63da9f0
JW
252 );\r
253\r
254/**\r
255 Adjust the start address and number of pages to free according to Guard.\r
256\r
257 The purpose of this function is to keep the shared Guard page with adjacent\r
258 memory block if it's still in guard, or free it if no more sharing. Another\r
259 is to reserve pages as Guard pages in partial page free situation.\r
260\r
261 @param[in,out] Memory Base address of memory to free.\r
262 @param[in,out] NumberOfPages Size of memory to free.\r
263\r
264 @return VOID.\r
265**/\r
266VOID\r
267AdjustMemoryF (\r
1436aea4
MK
268 IN OUT EFI_PHYSICAL_ADDRESS *Memory,\r
269 IN OUT UINTN *NumberOfPages\r
e63da9f0
JW
270 );\r
271\r
272/**\r
273 Adjust address of free memory according to existing and/or required Guard.\r
274\r
275 This function will check if there're existing Guard pages of adjacent\r
276 memory blocks, and try to use it as the Guard page of the memory to be\r
277 allocated.\r
278\r
279 @param[in] Start Start address of free memory block.\r
280 @param[in] Size Size of free memory block.\r
281 @param[in] SizeRequested Size of memory to allocate.\r
282\r
283 @return The end address of memory block found.\r
284 @return 0 if no enough space for the required size of memory and its Guard.\r
285**/\r
286UINT64\r
287AdjustMemoryS (\r
1436aea4
MK
288 IN UINT64 Start,\r
289 IN UINT64 Size,\r
290 IN UINT64 SizeRequested\r
e63da9f0
JW
291 );\r
292\r
293/**\r
294 Check to see if the pool at the given address should be guarded or not.\r
295\r
296 @param[in] MemoryType Pool type to check.\r
297\r
298\r
299 @return TRUE The given type of pool should be guarded.\r
300 @return FALSE The given type of pool should not be guarded.\r
301**/\r
302BOOLEAN\r
303IsPoolTypeToGuard (\r
1436aea4 304 IN EFI_MEMORY_TYPE MemoryType\r
e63da9f0
JW
305 );\r
306\r
307/**\r
308 Check to see if the page at the given address should be guarded or not.\r
309\r
310 @param[in] MemoryType Page type to check.\r
311 @param[in] AllocateType Allocation type to check.\r
312\r
313 @return TRUE The given type of page should be guarded.\r
314 @return FALSE The given type of page should not be guarded.\r
315**/\r
316BOOLEAN\r
317IsPageTypeToGuard (\r
1436aea4
MK
318 IN EFI_MEMORY_TYPE MemoryType,\r
319 IN EFI_ALLOCATE_TYPE AllocateType\r
e63da9f0
JW
320 );\r
321\r
322/**\r
323 Check to see if the page at the given address is guarded or not.\r
324\r
325 @param[in] Address The address to check for.\r
326\r
327 @return TRUE The page at Address is guarded.\r
328 @return FALSE The page at Address is not guarded.\r
329**/\r
330BOOLEAN\r
331EFIAPI\r
332IsMemoryGuarded (\r
1436aea4 333 IN EFI_PHYSICAL_ADDRESS Address\r
e63da9f0
JW
334 );\r
335\r
336/**\r
337 Check to see if the page at the given address is a Guard page or not.\r
338\r
339 @param[in] Address The address to check for.\r
340\r
341 @return TRUE The page at Address is a Guard page.\r
342 @return FALSE The page at Address is not a Guard page.\r
343**/\r
344BOOLEAN\r
345EFIAPI\r
346IsGuardPage (\r
1436aea4 347 IN EFI_PHYSICAL_ADDRESS Address\r
e63da9f0
JW
348 );\r
349\r
350/**\r
351 Dump the guarded memory bit map.\r
352**/\r
353VOID\r
354EFIAPI\r
355DumpGuardedMemoryBitmap (\r
356 VOID\r
357 );\r
358\r
359/**\r
360 Adjust the pool head position to make sure the Guard page is adjavent to\r
361 pool tail or pool head.\r
362\r
363 @param[in] Memory Base address of memory allocated.\r
364 @param[in] NoPages Number of pages actually allocated.\r
365 @param[in] Size Size of memory requested.\r
366 (plus pool head/tail overhead)\r
367\r
368 @return Address of pool head.\r
369**/\r
370VOID *\r
371AdjustPoolHeadA (\r
1436aea4
MK
372 IN EFI_PHYSICAL_ADDRESS Memory,\r
373 IN UINTN NoPages,\r
374 IN UINTN Size\r
e63da9f0
JW
375 );\r
376\r
377/**\r
378 Get the page base address according to pool head address.\r
379\r
380 @param[in] Memory Head address of pool to free.\r
381\r
382 @return Address of pool head.\r
383**/\r
384VOID *\r
385AdjustPoolHeadF (\r
1436aea4 386 IN EFI_PHYSICAL_ADDRESS Memory\r
e63da9f0
JW
387 );\r
388\r
a6a0a597
JW
389/**\r
390 Check to see if the heap guard is enabled for page and/or pool allocation.\r
391\r
63ebde8e
JW
392 @param[in] GuardType Specify the sub-type(s) of Heap Guard.\r
393\r
a6a0a597
JW
394 @return TRUE/FALSE.\r
395**/\r
396BOOLEAN\r
397IsHeapGuardEnabled (\r
1436aea4 398 UINT8 GuardType\r
a6a0a597
JW
399 );\r
400\r
7fef06af
JW
401/**\r
402 Notify function used to set all Guard pages after CPU Arch Protocol installed.\r
403**/\r
404VOID\r
405HeapGuardCpuArchProtocolNotify (\r
406 VOID\r
407 );\r
408\r
63ebde8e
JW
409/**\r
410 This function checks to see if the given memory map descriptor in a memory map\r
411 can be merged with any guarded free pages.\r
412\r
413 @param MemoryMapEntry A pointer to a descriptor in MemoryMap.\r
414 @param MaxAddress Maximum address to stop the merge.\r
415\r
416 @return VOID\r
417\r
418**/\r
419VOID\r
420MergeGuardPages (\r
1436aea4
MK
421 IN EFI_MEMORY_DESCRIPTOR *MemoryMapEntry,\r
422 IN EFI_PHYSICAL_ADDRESS MaxAddress\r
63ebde8e
JW
423 );\r
424\r
425/**\r
426 Record freed pages as well as mark them as not-present, if enabled.\r
427\r
428 @param[in] BaseAddress Base address of just freed pages.\r
429 @param[in] Pages Number of freed pages.\r
430\r
431 @return VOID.\r
432**/\r
433VOID\r
434EFIAPI\r
435GuardFreedPagesChecked (\r
1436aea4
MK
436 IN EFI_PHYSICAL_ADDRESS BaseAddress,\r
437 IN UINTN Pages\r
63ebde8e
JW
438 );\r
439\r
440/**\r
441 Put part (at most 64 pages a time) guarded free pages back to free page pool.\r
442\r
443 Freed memory guard is used to detect Use-After-Free (UAF) memory issue, which\r
444 makes use of 'Used then throw away' way to detect any illegal access to freed\r
445 memory. The thrown-away memory will be marked as not-present so that any access\r
446 to those memory (after free) will be caught by page-fault exception.\r
447\r
448 The problem is that this will consume lots of memory space. Once no memory\r
449 left in pool to allocate, we have to restore part of the freed pages to their\r
450 normal function. Otherwise the whole system will stop functioning.\r
451\r
452 @param StartAddress Start address of promoted memory.\r
453 @param EndAddress End address of promoted memory.\r
454\r
455 @return TRUE Succeeded to promote memory.\r
456 @return FALSE No free memory found.\r
457\r
458**/\r
459BOOLEAN\r
460PromoteGuardedFreePages (\r
1436aea4
MK
461 OUT EFI_PHYSICAL_ADDRESS *StartAddress,\r
462 OUT EFI_PHYSICAL_ADDRESS *EndAddress\r
63ebde8e
JW
463 );\r
464\r
1436aea4 465extern BOOLEAN mOnGuarding;\r
e63da9f0
JW
466\r
467#endif\r