]> git.proxmox.com Git - mirror_edk2.git/blob - MdeModulePkg/Core/PiSmmCore/HeapGuard.h
UefiCpuPkg: Move AsmRelocateApLoopStart from Mpfuncs.nasm to AmdSev.nasm
[mirror_edk2.git] / MdeModulePkg / Core / PiSmmCore / HeapGuard.h
1 /** @file
2 Data structure and functions to allocate and free memory space.
3
4 Copyright (c) 2017, Intel Corporation. All rights reserved.<BR>
5 SPDX-License-Identifier: BSD-2-Clause-Patent
6
7 **/
8
9 #ifndef _HEAPGUARD_H_
10 #define _HEAPGUARD_H_
11
12 #include "PiSmmCore.h"
13
14 //
15 // Following macros are used to define and access the guarded memory bitmap
16 // table.
17 //
18 // To simplify the access and reduce the memory used for this table, the
19 // table is constructed in the similar way as page table structure but in
20 // reverse direction, i.e. from bottom growing up to top.
21 //
22 // - 1-bit tracks 1 page (4KB)
23 // - 1-UINT64 map entry tracks 256KB memory
24 // - 1K-UINT64 map table tracks 256MB memory
25 // - Five levels of tables can track any address of memory of 64-bit
26 // system, like below.
27 //
28 // 512 * 512 * 512 * 512 * 1K * 64b * 4K
29 // 111111111 111111111 111111111 111111111 1111111111 111111 111111111111
30 // 63 54 45 36 27 17 11 0
31 // 9b 9b 9b 9b 10b 6b 12b
32 // L0 -> L1 -> L2 -> L3 -> L4 -> bits -> page
33 // 1FF 1FF 1FF 1FF 3FF 3F FFF
34 //
35 // L4 table has 1K * sizeof(UINT64) = 8K (2-page), which can track 256MB
36 // memory. Each table of L0-L3 will be allocated when its memory address
37 // range is to be tracked. Only 1-page will be allocated each time. This
38 // can save memories used to establish this map table.
39 //
40 // For a normal configuration of system with 4G memory, two levels of tables
41 // can track the whole memory, because two levels (L3+L4) of map tables have
42 // already covered 37-bit of memory address. And for a normal UEFI BIOS,
43 // less than 128M memory would be consumed during boot. That means we just
44 // need
45 //
46 // 1-page (L3) + 2-page (L4)
47 //
48 // memory (3 pages) to track the memory allocation works. In this case,
49 // there's no need to setup L0-L2 tables.
50 //
51
52 //
53 // Each entry occupies 8B/64b. 1-page can hold 512 entries, which spans 9
54 // bits in address. (512 = 1 << 9)
55 //
56 #define BYTE_LENGTH_SHIFT 3 // (8 = 1 << 3)
57
58 #define GUARDED_HEAP_MAP_TABLE_ENTRY_SHIFT \
59 (EFI_PAGE_SHIFT - BYTE_LENGTH_SHIFT)
60
61 #define GUARDED_HEAP_MAP_TABLE_DEPTH 5
62
63 // Use UINT64_index + bit_index_of_UINT64 to locate the bit in may
64 #define GUARDED_HEAP_MAP_ENTRY_BIT_SHIFT 6 // (64 = 1 << 6)
65
66 #define GUARDED_HEAP_MAP_ENTRY_BITS \
67 (1 << GUARDED_HEAP_MAP_ENTRY_BIT_SHIFT)
68
69 #define GUARDED_HEAP_MAP_ENTRY_BYTES \
70 (GUARDED_HEAP_MAP_ENTRY_BITS / 8)
71
72 // L4 table address width: 64 - 9 * 4 - 6 - 12 = 10b
73 #define GUARDED_HEAP_MAP_ENTRY_SHIFT \
74 (GUARDED_HEAP_MAP_ENTRY_BITS \
75 - GUARDED_HEAP_MAP_TABLE_ENTRY_SHIFT * 4 \
76 - GUARDED_HEAP_MAP_ENTRY_BIT_SHIFT \
77 - EFI_PAGE_SHIFT)
78
79 // L4 table address mask: (1 << 10 - 1) = 0x3FF
80 #define GUARDED_HEAP_MAP_ENTRY_MASK \
81 ((1 << GUARDED_HEAP_MAP_ENTRY_SHIFT) - 1)
82
83 // Size of each L4 table: (1 << 10) * 8 = 8KB = 2-page
84 #define GUARDED_HEAP_MAP_SIZE \
85 ((1 << GUARDED_HEAP_MAP_ENTRY_SHIFT) * GUARDED_HEAP_MAP_ENTRY_BYTES)
86
87 // Memory size tracked by one L4 table: 8KB * 8 * 4KB = 256MB
88 #define GUARDED_HEAP_MAP_UNIT_SIZE \
89 (GUARDED_HEAP_MAP_SIZE * 8 * EFI_PAGE_SIZE)
90
91 // L4 table entry number: 8KB / 8 = 1024
92 #define GUARDED_HEAP_MAP_ENTRIES_PER_UNIT \
93 (GUARDED_HEAP_MAP_SIZE / GUARDED_HEAP_MAP_ENTRY_BYTES)
94
95 // L4 table entry indexing
96 #define GUARDED_HEAP_MAP_ENTRY_INDEX(Address) \
97 (RShiftU64 (Address, EFI_PAGE_SHIFT \
98 + GUARDED_HEAP_MAP_ENTRY_BIT_SHIFT) \
99 & GUARDED_HEAP_MAP_ENTRY_MASK)
100
101 // L4 table entry bit indexing
102 #define GUARDED_HEAP_MAP_ENTRY_BIT_INDEX(Address) \
103 (RShiftU64 (Address, EFI_PAGE_SHIFT) \
104 & ((1 << GUARDED_HEAP_MAP_ENTRY_BIT_SHIFT) - 1))
105
106 //
107 // Total bits (pages) tracked by one L4 table (65536-bit)
108 //
109 #define GUARDED_HEAP_MAP_BITS \
110 (1 << (GUARDED_HEAP_MAP_ENTRY_SHIFT \
111 + GUARDED_HEAP_MAP_ENTRY_BIT_SHIFT))
112
113 //
114 // Bit indexing inside the whole L4 table (0 - 65535)
115 //
116 #define GUARDED_HEAP_MAP_BIT_INDEX(Address) \
117 (RShiftU64 (Address, EFI_PAGE_SHIFT) \
118 & ((1 << (GUARDED_HEAP_MAP_ENTRY_SHIFT \
119 + GUARDED_HEAP_MAP_ENTRY_BIT_SHIFT)) - 1))
120
121 //
122 // Memory address bit width tracked by L4 table: 10 + 6 + 12 = 28
123 //
124 #define GUARDED_HEAP_MAP_TABLE_SHIFT \
125 (GUARDED_HEAP_MAP_ENTRY_SHIFT + GUARDED_HEAP_MAP_ENTRY_BIT_SHIFT \
126 + EFI_PAGE_SHIFT)
127
128 //
129 // Macro used to initialize the local array variable for map table traversing
130 // {55, 46, 37, 28, 18}
131 //
132 #define GUARDED_HEAP_MAP_TABLE_DEPTH_SHIFTS \
133 { \
134 GUARDED_HEAP_MAP_TABLE_SHIFT + GUARDED_HEAP_MAP_TABLE_ENTRY_SHIFT * 3, \
135 GUARDED_HEAP_MAP_TABLE_SHIFT + GUARDED_HEAP_MAP_TABLE_ENTRY_SHIFT * 2, \
136 GUARDED_HEAP_MAP_TABLE_SHIFT + GUARDED_HEAP_MAP_TABLE_ENTRY_SHIFT, \
137 GUARDED_HEAP_MAP_TABLE_SHIFT, \
138 EFI_PAGE_SHIFT + GUARDED_HEAP_MAP_ENTRY_BIT_SHIFT \
139 }
140
141 //
142 // Masks used to extract address range of each level of table
143 // {0x1FF, 0x1FF, 0x1FF, 0x1FF, 0x3FF}
144 //
145 #define GUARDED_HEAP_MAP_TABLE_DEPTH_MASKS \
146 { \
147 (1 << GUARDED_HEAP_MAP_TABLE_ENTRY_SHIFT) - 1, \
148 (1 << GUARDED_HEAP_MAP_TABLE_ENTRY_SHIFT) - 1, \
149 (1 << GUARDED_HEAP_MAP_TABLE_ENTRY_SHIFT) - 1, \
150 (1 << GUARDED_HEAP_MAP_TABLE_ENTRY_SHIFT) - 1, \
151 (1 << GUARDED_HEAP_MAP_ENTRY_SHIFT) - 1 \
152 }
153
154 //
155 // Memory type to guard (matching the related PCD definition)
156 //
157 #define GUARD_HEAP_TYPE_PAGE BIT2
158 #define GUARD_HEAP_TYPE_POOL BIT3
159
160 //
161 // Debug message level
162 //
163 #define HEAP_GUARD_DEBUG_LEVEL (DEBUG_POOL|DEBUG_PAGE)
164
165 typedef struct {
166 UINT32 TailMark;
167 UINT32 HeadMark;
168 EFI_PHYSICAL_ADDRESS Address;
169 LIST_ENTRY Link;
170 } HEAP_GUARD_NODE;
171
172 /**
173 Set head Guard and tail Guard for the given memory range.
174
175 @param[in] Memory Base address of memory to set guard for.
176 @param[in] NumberOfPages Memory size in pages.
177
178 @return VOID.
179 **/
180 VOID
181 SetGuardForMemory (
182 IN EFI_PHYSICAL_ADDRESS Memory,
183 IN UINTN NumberOfPages
184 );
185
186 /**
187 Unset head Guard and tail Guard for the given memory range.
188
189 @param[in] Memory Base address of memory to unset guard for.
190 @param[in] NumberOfPages Memory size in pages.
191
192 @return VOID.
193 **/
194 VOID
195 UnsetGuardForMemory (
196 IN EFI_PHYSICAL_ADDRESS Memory,
197 IN UINTN NumberOfPages
198 );
199
200 /**
201 Adjust the base and number of pages to really allocate according to Guard.
202
203 @param[in,out] Memory Base address of free memory.
204 @param[in,out] NumberOfPages Size of memory to allocate.
205
206 @return VOID.
207 **/
208 VOID
209 AdjustMemoryA (
210 IN OUT EFI_PHYSICAL_ADDRESS *Memory,
211 IN OUT UINTN *NumberOfPages
212 );
213
214 /**
215 Adjust the start address and number of pages to free according to Guard.
216
217 The purpose of this function is to keep the shared Guard page with adjacent
218 memory block if it's still in guard, or free it if no more sharing. Another
219 is to reserve pages as Guard pages in partial page free situation.
220
221 @param[in,out] Memory Base address of memory to free.
222 @param[in,out] NumberOfPages Size of memory to free.
223
224 @return VOID.
225 **/
226 VOID
227 AdjustMemoryF (
228 IN OUT EFI_PHYSICAL_ADDRESS *Memory,
229 IN OUT UINTN *NumberOfPages
230 );
231
232 /**
233 Check to see if the pool at the given address should be guarded or not.
234
235 @param[in] MemoryType Pool type to check.
236
237
238 @return TRUE The given type of pool should be guarded.
239 @return FALSE The given type of pool should not be guarded.
240 **/
241 BOOLEAN
242 IsPoolTypeToGuard (
243 IN EFI_MEMORY_TYPE MemoryType
244 );
245
246 /**
247 Check to see if the page at the given address should be guarded or not.
248
249 @param[in] MemoryType Page type to check.
250 @param[in] AllocateType Allocation type to check.
251
252 @return TRUE The given type of page should be guarded.
253 @return FALSE The given type of page should not be guarded.
254 **/
255 BOOLEAN
256 IsPageTypeToGuard (
257 IN EFI_MEMORY_TYPE MemoryType,
258 IN EFI_ALLOCATE_TYPE AllocateType
259 );
260
261 /**
262 Check to see if the page at the given address is guarded or not.
263
264 @param[in] Address The address to check for.
265
266 @return TRUE The page at Address is guarded.
267 @return FALSE The page at Address is not guarded.
268 **/
269 BOOLEAN
270 EFIAPI
271 IsMemoryGuarded (
272 IN EFI_PHYSICAL_ADDRESS Address
273 );
274
275 /**
276 Check to see if the page at the given address is a Guard page or not.
277
278 @param[in] Address The address to check for.
279
280 @return TRUE The page at Address is a Guard page.
281 @return FALSE The page at Address is not a Guard page.
282 **/
283 BOOLEAN
284 EFIAPI
285 IsGuardPage (
286 IN EFI_PHYSICAL_ADDRESS Address
287 );
288
289 /**
290 Dump the guarded memory bit map.
291 **/
292 VOID
293 EFIAPI
294 DumpGuardedMemoryBitmap (
295 VOID
296 );
297
298 /**
299 Adjust the pool head position to make sure the Guard page is adjavent to
300 pool tail or pool head.
301
302 @param[in] Memory Base address of memory allocated.
303 @param[in] NoPages Number of pages actually allocated.
304 @param[in] Size Size of memory requested.
305 (plus pool head/tail overhead)
306
307 @return Address of pool head.
308 **/
309 VOID *
310 AdjustPoolHeadA (
311 IN EFI_PHYSICAL_ADDRESS Memory,
312 IN UINTN NoPages,
313 IN UINTN Size
314 );
315
316 /**
317 Get the page base address according to pool head address.
318
319 @param[in] Memory Head address of pool to free.
320
321 @return Address of pool head.
322 **/
323 VOID *
324 AdjustPoolHeadF (
325 IN EFI_PHYSICAL_ADDRESS Memory
326 );
327
328 /**
329 Helper function of memory allocation with Guard pages.
330
331 @param FreePageList The free page node.
332 @param NumberOfPages Number of pages to be allocated.
333 @param MaxAddress Request to allocate memory below this address.
334 @param MemoryType Type of memory requested.
335
336 @return Memory address of allocated pages.
337 **/
338 UINTN
339 InternalAllocMaxAddressWithGuard (
340 IN OUT LIST_ENTRY *FreePageList,
341 IN UINTN NumberOfPages,
342 IN UINTN MaxAddress,
343 IN EFI_MEMORY_TYPE MemoryType
344 );
345
346 /**
347 Helper function of memory free with Guard pages.
348
349 @param[in] Memory Base address of memory being freed.
350 @param[in] NumberOfPages The number of pages to free.
351 @param[in] AddRegion If this memory is new added region.
352
353 @retval EFI_NOT_FOUND Could not find the entry that covers the range.
354 @retval EFI_INVALID_PARAMETER Address not aligned, Address is zero or
355 NumberOfPages is zero.
356 @return EFI_SUCCESS Pages successfully freed.
357 **/
358 EFI_STATUS
359 SmmInternalFreePagesExWithGuard (
360 IN EFI_PHYSICAL_ADDRESS Memory,
361 IN UINTN NumberOfPages,
362 IN BOOLEAN AddRegion
363 );
364
365 /**
366 Check to see if the heap guard is enabled for page and/or pool allocation.
367
368 @return TRUE/FALSE.
369 **/
370 BOOLEAN
371 IsHeapGuardEnabled (
372 VOID
373 );
374
375 /**
376 Debug function used to verify if the Guard page is well set or not.
377
378 @param[in] BaseAddress Address of memory to check.
379 @param[in] NumberOfPages Size of memory in pages.
380
381 @return TRUE The head Guard and tail Guard are both well set.
382 @return FALSE The head Guard and/or tail Guard are not well set.
383 **/
384 BOOLEAN
385 VerifyMemoryGuard (
386 IN EFI_PHYSICAL_ADDRESS BaseAddress,
387 IN UINTN NumberOfPages
388 );
389
390 extern BOOLEAN mOnGuarding;
391
392 #endif