]> git.proxmox.com Git - mirror_edk2.git/blob - MdeModulePkg/Core/Dxe/Mem/HeapGuard.h
MdeModulePkg/Core: add freed-memory guard feature
[mirror_edk2.git] / MdeModulePkg / Core / Dxe / Mem / HeapGuard.h
1 /** @file
2 Data type, macros and function prototypes of heap guard feature.
3
4 Copyright (c) 2017-2018, Intel Corporation. All rights reserved.<BR>
5 This program and the accompanying materials
6 are licensed and made available under the terms and conditions of the BSD License
7 which accompanies this distribution. The full text of the license may be found at
8 http://opensource.org/licenses/bsd-license.php
9
10 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
11 WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
12
13 **/
14
15 #ifndef _HEAPGUARD_H_
16 #define _HEAPGUARD_H_
17
18 //
19 // Following macros are used to define and access the guarded memory bitmap
20 // table.
21 //
22 // To simplify the access and reduce the memory used for this table, the
23 // table is constructed in the similar way as page table structure but in
24 // reverse direction, i.e. from bottom growing up to top.
25 //
26 // - 1-bit tracks 1 page (4KB)
27 // - 1-UINT64 map entry tracks 256KB memory
28 // - 1K-UINT64 map table tracks 256MB memory
29 // - Five levels of tables can track any address of memory of 64-bit
30 // system, like below.
31 //
32 // 512 * 512 * 512 * 512 * 1K * 64b * 4K
33 // 111111111 111111111 111111111 111111111 1111111111 111111 111111111111
34 // 63 54 45 36 27 17 11 0
35 // 9b 9b 9b 9b 10b 6b 12b
36 // L0 -> L1 -> L2 -> L3 -> L4 -> bits -> page
37 // 1FF 1FF 1FF 1FF 3FF 3F FFF
38 //
39 // L4 table has 1K * sizeof(UINT64) = 8K (2-page), which can track 256MB
40 // memory. Each table of L0-L3 will be allocated when its memory address
41 // range is to be tracked. Only 1-page will be allocated each time. This
42 // can save memories used to establish this map table.
43 //
44 // For a normal configuration of system with 4G memory, two levels of tables
45 // can track the whole memory, because two levels (L3+L4) of map tables have
46 // already coverred 37-bit of memory address. And for a normal UEFI BIOS,
47 // less than 128M memory would be consumed during boot. That means we just
48 // need
49 //
50 // 1-page (L3) + 2-page (L4)
51 //
52 // memory (3 pages) to track the memory allocation works. In this case,
53 // there's no need to setup L0-L2 tables.
54 //
55
56 //
57 // Each entry occupies 8B/64b. 1-page can hold 512 entries, which spans 9
58 // bits in address. (512 = 1 << 9)
59 //
60 #define BYTE_LENGTH_SHIFT 3 // (8 = 1 << 3)
61
62 #define GUARDED_HEAP_MAP_TABLE_ENTRY_SHIFT \
63 (EFI_PAGE_SHIFT - BYTE_LENGTH_SHIFT)
64
65 #define GUARDED_HEAP_MAP_TABLE_DEPTH 5
66
67 // Use UINT64_index + bit_index_of_UINT64 to locate the bit in may
68 #define GUARDED_HEAP_MAP_ENTRY_BIT_SHIFT 6 // (64 = 1 << 6)
69
70 #define GUARDED_HEAP_MAP_ENTRY_BITS \
71 (1 << GUARDED_HEAP_MAP_ENTRY_BIT_SHIFT)
72
73 #define GUARDED_HEAP_MAP_ENTRY_BYTES \
74 (GUARDED_HEAP_MAP_ENTRY_BITS / 8)
75
76 // L4 table address width: 64 - 9 * 4 - 6 - 12 = 10b
77 #define GUARDED_HEAP_MAP_ENTRY_SHIFT \
78 (GUARDED_HEAP_MAP_ENTRY_BITS \
79 - GUARDED_HEAP_MAP_TABLE_ENTRY_SHIFT * 4 \
80 - GUARDED_HEAP_MAP_ENTRY_BIT_SHIFT \
81 - EFI_PAGE_SHIFT)
82
83 // L4 table address mask: (1 << 10 - 1) = 0x3FF
84 #define GUARDED_HEAP_MAP_ENTRY_MASK \
85 ((1 << GUARDED_HEAP_MAP_ENTRY_SHIFT) - 1)
86
87 // Size of each L4 table: (1 << 10) * 8 = 8KB = 2-page
88 #define GUARDED_HEAP_MAP_SIZE \
89 ((1 << GUARDED_HEAP_MAP_ENTRY_SHIFT) * GUARDED_HEAP_MAP_ENTRY_BYTES)
90
91 // Memory size tracked by one L4 table: 8KB * 8 * 4KB = 256MB
92 #define GUARDED_HEAP_MAP_UNIT_SIZE \
93 (GUARDED_HEAP_MAP_SIZE * 8 * EFI_PAGE_SIZE)
94
95 // L4 table entry number: 8KB / 8 = 1024
96 #define GUARDED_HEAP_MAP_ENTRIES_PER_UNIT \
97 (GUARDED_HEAP_MAP_SIZE / GUARDED_HEAP_MAP_ENTRY_BYTES)
98
99 // L4 table entry indexing
100 #define GUARDED_HEAP_MAP_ENTRY_INDEX(Address) \
101 (RShiftU64 (Address, EFI_PAGE_SHIFT \
102 + GUARDED_HEAP_MAP_ENTRY_BIT_SHIFT) \
103 & GUARDED_HEAP_MAP_ENTRY_MASK)
104
105 // L4 table entry bit indexing
106 #define GUARDED_HEAP_MAP_ENTRY_BIT_INDEX(Address) \
107 (RShiftU64 (Address, EFI_PAGE_SHIFT) \
108 & ((1 << GUARDED_HEAP_MAP_ENTRY_BIT_SHIFT) - 1))
109
110 //
111 // Total bits (pages) tracked by one L4 table (65536-bit)
112 //
113 #define GUARDED_HEAP_MAP_BITS \
114 (1 << (GUARDED_HEAP_MAP_ENTRY_SHIFT \
115 + GUARDED_HEAP_MAP_ENTRY_BIT_SHIFT))
116
117 //
118 // Bit indexing inside the whole L4 table (0 - 65535)
119 //
120 #define GUARDED_HEAP_MAP_BIT_INDEX(Address) \
121 (RShiftU64 (Address, EFI_PAGE_SHIFT) \
122 & ((1 << (GUARDED_HEAP_MAP_ENTRY_SHIFT \
123 + GUARDED_HEAP_MAP_ENTRY_BIT_SHIFT)) - 1))
124
125 //
126 // Memory address bit width tracked by L4 table: 10 + 6 + 12 = 28
127 //
128 #define GUARDED_HEAP_MAP_TABLE_SHIFT \
129 (GUARDED_HEAP_MAP_ENTRY_SHIFT + GUARDED_HEAP_MAP_ENTRY_BIT_SHIFT \
130 + EFI_PAGE_SHIFT)
131
132 //
133 // Macro used to initialize the local array variable for map table traversing
134 // {55, 46, 37, 28, 18}
135 //
136 #define GUARDED_HEAP_MAP_TABLE_DEPTH_SHIFTS \
137 { \
138 GUARDED_HEAP_MAP_TABLE_SHIFT + GUARDED_HEAP_MAP_TABLE_ENTRY_SHIFT * 3, \
139 GUARDED_HEAP_MAP_TABLE_SHIFT + GUARDED_HEAP_MAP_TABLE_ENTRY_SHIFT * 2, \
140 GUARDED_HEAP_MAP_TABLE_SHIFT + GUARDED_HEAP_MAP_TABLE_ENTRY_SHIFT, \
141 GUARDED_HEAP_MAP_TABLE_SHIFT, \
142 EFI_PAGE_SHIFT + GUARDED_HEAP_MAP_ENTRY_BIT_SHIFT \
143 }
144
145 //
146 // Masks used to extract address range of each level of table
147 // {0x1FF, 0x1FF, 0x1FF, 0x1FF, 0x3FF}
148 //
149 #define GUARDED_HEAP_MAP_TABLE_DEPTH_MASKS \
150 { \
151 (1 << GUARDED_HEAP_MAP_TABLE_ENTRY_SHIFT) - 1, \
152 (1 << GUARDED_HEAP_MAP_TABLE_ENTRY_SHIFT) - 1, \
153 (1 << GUARDED_HEAP_MAP_TABLE_ENTRY_SHIFT) - 1, \
154 (1 << GUARDED_HEAP_MAP_TABLE_ENTRY_SHIFT) - 1, \
155 (1 << GUARDED_HEAP_MAP_ENTRY_SHIFT) - 1 \
156 }
157
158 //
159 // Memory type to guard (matching the related PCD definition)
160 //
161 #define GUARD_HEAP_TYPE_PAGE BIT0
162 #define GUARD_HEAP_TYPE_POOL BIT1
163 #define GUARD_HEAP_TYPE_FREED BIT4
164 #define GUARD_HEAP_TYPE_ALL \
165 (GUARD_HEAP_TYPE_PAGE|GUARD_HEAP_TYPE_POOL|GUARD_HEAP_TYPE_FREED)
166
167 //
168 // Debug message level
169 //
170 #define HEAP_GUARD_DEBUG_LEVEL (DEBUG_POOL|DEBUG_PAGE)
171
172 typedef struct {
173 UINT32 TailMark;
174 UINT32 HeadMark;
175 EFI_PHYSICAL_ADDRESS Address;
176 LIST_ENTRY Link;
177 } HEAP_GUARD_NODE;
178
179 /**
180 Internal function. Converts a memory range to the specified type.
181 The range must exist in the memory map.
182
183 @param Start The first address of the range Must be page
184 aligned.
185 @param NumberOfPages The number of pages to convert.
186 @param NewType The new type for the memory range.
187
188 @retval EFI_INVALID_PARAMETER Invalid parameter.
189 @retval EFI_NOT_FOUND Could not find a descriptor cover the specified
190 range or convertion not allowed.
191 @retval EFI_SUCCESS Successfully converts the memory range to the
192 specified type.
193
194 **/
195 EFI_STATUS
196 CoreConvertPages (
197 IN UINT64 Start,
198 IN UINT64 NumberOfPages,
199 IN EFI_MEMORY_TYPE NewType
200 );
201
202 /**
203 Allocate or free guarded memory.
204
205 @param[in] Start Start address of memory to allocate or free.
206 @param[in] NumberOfPages Memory size in pages.
207 @param[in] NewType Memory type to convert to.
208
209 @return VOID.
210 **/
211 EFI_STATUS
212 CoreConvertPagesWithGuard (
213 IN UINT64 Start,
214 IN UINTN NumberOfPages,
215 IN EFI_MEMORY_TYPE NewType
216 );
217
218 /**
219 Set head Guard and tail Guard for the given memory range.
220
221 @param[in] Memory Base address of memory to set guard for.
222 @param[in] NumberOfPages Memory size in pages.
223
224 @return VOID.
225 **/
226 VOID
227 SetGuardForMemory (
228 IN EFI_PHYSICAL_ADDRESS Memory,
229 IN UINTN NumberOfPages
230 );
231
232 /**
233 Unset head Guard and tail Guard for the given memory range.
234
235 @param[in] Memory Base address of memory to unset guard for.
236 @param[in] NumberOfPages Memory size in pages.
237
238 @return VOID.
239 **/
240 VOID
241 UnsetGuardForMemory (
242 IN EFI_PHYSICAL_ADDRESS Memory,
243 IN UINTN NumberOfPages
244 );
245
246 /**
247 Adjust the base and number of pages to really allocate according to Guard.
248
249 @param[in,out] Memory Base address of free memory.
250 @param[in,out] NumberOfPages Size of memory to allocate.
251
252 @return VOID.
253 **/
254 VOID
255 AdjustMemoryA (
256 IN OUT EFI_PHYSICAL_ADDRESS *Memory,
257 IN OUT UINTN *NumberOfPages
258 );
259
260 /**
261 Adjust the start address and number of pages to free according to Guard.
262
263 The purpose of this function is to keep the shared Guard page with adjacent
264 memory block if it's still in guard, or free it if no more sharing. Another
265 is to reserve pages as Guard pages in partial page free situation.
266
267 @param[in,out] Memory Base address of memory to free.
268 @param[in,out] NumberOfPages Size of memory to free.
269
270 @return VOID.
271 **/
272 VOID
273 AdjustMemoryF (
274 IN OUT EFI_PHYSICAL_ADDRESS *Memory,
275 IN OUT UINTN *NumberOfPages
276 );
277
278 /**
279 Adjust address of free memory according to existing and/or required Guard.
280
281 This function will check if there're existing Guard pages of adjacent
282 memory blocks, and try to use it as the Guard page of the memory to be
283 allocated.
284
285 @param[in] Start Start address of free memory block.
286 @param[in] Size Size of free memory block.
287 @param[in] SizeRequested Size of memory to allocate.
288
289 @return The end address of memory block found.
290 @return 0 if no enough space for the required size of memory and its Guard.
291 **/
292 UINT64
293 AdjustMemoryS (
294 IN UINT64 Start,
295 IN UINT64 Size,
296 IN UINT64 SizeRequested
297 );
298
299 /**
300 Check to see if the pool at the given address should be guarded or not.
301
302 @param[in] MemoryType Pool type to check.
303
304
305 @return TRUE The given type of pool should be guarded.
306 @return FALSE The given type of pool should not be guarded.
307 **/
308 BOOLEAN
309 IsPoolTypeToGuard (
310 IN EFI_MEMORY_TYPE MemoryType
311 );
312
313 /**
314 Check to see if the page at the given address should be guarded or not.
315
316 @param[in] MemoryType Page type to check.
317 @param[in] AllocateType Allocation type to check.
318
319 @return TRUE The given type of page should be guarded.
320 @return FALSE The given type of page should not be guarded.
321 **/
322 BOOLEAN
323 IsPageTypeToGuard (
324 IN EFI_MEMORY_TYPE MemoryType,
325 IN EFI_ALLOCATE_TYPE AllocateType
326 );
327
328 /**
329 Check to see if the page at the given address is guarded or not.
330
331 @param[in] Address The address to check for.
332
333 @return TRUE The page at Address is guarded.
334 @return FALSE The page at Address is not guarded.
335 **/
336 BOOLEAN
337 EFIAPI
338 IsMemoryGuarded (
339 IN EFI_PHYSICAL_ADDRESS Address
340 );
341
342 /**
343 Check to see if the page at the given address is a Guard page or not.
344
345 @param[in] Address The address to check for.
346
347 @return TRUE The page at Address is a Guard page.
348 @return FALSE The page at Address is not a Guard page.
349 **/
350 BOOLEAN
351 EFIAPI
352 IsGuardPage (
353 IN EFI_PHYSICAL_ADDRESS Address
354 );
355
356 /**
357 Dump the guarded memory bit map.
358 **/
359 VOID
360 EFIAPI
361 DumpGuardedMemoryBitmap (
362 VOID
363 );
364
365 /**
366 Adjust the pool head position to make sure the Guard page is adjavent to
367 pool tail or pool head.
368
369 @param[in] Memory Base address of memory allocated.
370 @param[in] NoPages Number of pages actually allocated.
371 @param[in] Size Size of memory requested.
372 (plus pool head/tail overhead)
373
374 @return Address of pool head.
375 **/
376 VOID *
377 AdjustPoolHeadA (
378 IN EFI_PHYSICAL_ADDRESS Memory,
379 IN UINTN NoPages,
380 IN UINTN Size
381 );
382
383 /**
384 Get the page base address according to pool head address.
385
386 @param[in] Memory Head address of pool to free.
387
388 @return Address of pool head.
389 **/
390 VOID *
391 AdjustPoolHeadF (
392 IN EFI_PHYSICAL_ADDRESS Memory
393 );
394
395 /**
396 Check to see if the heap guard is enabled for page and/or pool allocation.
397
398 @param[in] GuardType Specify the sub-type(s) of Heap Guard.
399
400 @return TRUE/FALSE.
401 **/
402 BOOLEAN
403 IsHeapGuardEnabled (
404 UINT8 GuardType
405 );
406
407 /**
408 Notify function used to set all Guard pages after CPU Arch Protocol installed.
409 **/
410 VOID
411 HeapGuardCpuArchProtocolNotify (
412 VOID
413 );
414
415 /**
416 This function checks to see if the given memory map descriptor in a memory map
417 can be merged with any guarded free pages.
418
419 @param MemoryMapEntry A pointer to a descriptor in MemoryMap.
420 @param MaxAddress Maximum address to stop the merge.
421
422 @return VOID
423
424 **/
425 VOID
426 MergeGuardPages (
427 IN EFI_MEMORY_DESCRIPTOR *MemoryMapEntry,
428 IN EFI_PHYSICAL_ADDRESS MaxAddress
429 );
430
431 /**
432 Record freed pages as well as mark them as not-present, if enabled.
433
434 @param[in] BaseAddress Base address of just freed pages.
435 @param[in] Pages Number of freed pages.
436
437 @return VOID.
438 **/
439 VOID
440 EFIAPI
441 GuardFreedPagesChecked (
442 IN EFI_PHYSICAL_ADDRESS BaseAddress,
443 IN UINTN Pages
444 );
445
446 /**
447 Put part (at most 64 pages a time) guarded free pages back to free page pool.
448
449 Freed memory guard is used to detect Use-After-Free (UAF) memory issue, which
450 makes use of 'Used then throw away' way to detect any illegal access to freed
451 memory. The thrown-away memory will be marked as not-present so that any access
452 to those memory (after free) will be caught by page-fault exception.
453
454 The problem is that this will consume lots of memory space. Once no memory
455 left in pool to allocate, we have to restore part of the freed pages to their
456 normal function. Otherwise the whole system will stop functioning.
457
458 @param StartAddress Start address of promoted memory.
459 @param EndAddress End address of promoted memory.
460
461 @return TRUE Succeeded to promote memory.
462 @return FALSE No free memory found.
463
464 **/
465 BOOLEAN
466 PromoteGuardedFreePages (
467 OUT EFI_PHYSICAL_ADDRESS *StartAddress,
468 OUT EFI_PHYSICAL_ADDRESS *EndAddress
469 );
470
471 extern BOOLEAN mOnGuarding;
472
473 #endif