]>
Commit | Line | Data |
---|---|---|
f3b33289 | 1 | /** @file\r |
2 | x64 Virtual Memory Management Services in the form of an IA-32 driver. \r | |
3 | Used to establish a 1:1 Virtual to Physical Mapping that is required to\r | |
4 | enter Long Mode (x64 64-bit mode).\r | |
5 | \r | |
6 | While we make a 1:1 mapping (identity mapping) for all physical pages \r | |
4140a663 | 7 | we still need to use the MTRR's to ensure that the cachability attributes\r |
f3b33289 | 8 | for all memory regions is correct.\r |
9 | \r | |
10 | The basic idea is to use 2MB page table entries where ever possible. If\r | |
11 | more granularity of cachability is required then 4K page tables are used.\r | |
12 | \r | |
13 | References:\r | |
4140a663 | 14 | 1) IA-32 Intel(R) Architecture Software Developer's Manual Volume 1:Basic Architecture, Intel\r |
15 | 2) IA-32 Intel(R) Architecture Software Developer's Manual Volume 2:Instruction Set Reference, Intel\r | |
16 | 3) IA-32 Intel(R) Architecture Software Developer's Manual Volume 3:System Programmer's Guide, Intel\r | |
f3b33289 | 17 | \r |
36829e67 | 18 | Copyright (c) 2006 - 2016, Intel Corporation. All rights reserved.<BR>\r |
5997daf7 LD |
19 | Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>\r |
20 | \r | |
cd5ebaa0 | 21 | This program and the accompanying materials\r |
f3b33289 | 22 | are licensed and made available under the terms and conditions of the BSD License\r |
23 | which accompanies this distribution. The full text of the license may be found at\r | |
24 | http://opensource.org/licenses/bsd-license.php\r | |
25 | \r | |
26 | THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r | |
27 | WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r | |
28 | \r | |
29 | **/ \r | |
30 | \r | |
31 | #include "DxeIpl.h"\r | |
32 | #include "VirtualMemory.h"\r | |
33 | \r | |
2ac1730b JW |
34 | //\r |
35 | // Global variable to keep track current available memory used as page table.\r | |
36 | //\r | |
37 | PAGE_TABLE_POOL *mPageTablePool = NULL;\r | |
38 | \r | |
9189ec20 | 39 | /**\r |
382aeac2 | 40 | Clear legacy memory located at the first 4K-page, if available.\r |
9189ec20 | 41 | \r |
382aeac2 DB |
42 | This function traverses the whole HOB list to check if memory from 0 to 4095\r |
43 | exists and has not been allocated, and then clear it if so.\r | |
9189ec20 | 44 | \r |
382aeac2 | 45 | @param HobStart The start of HobList passed to DxeCore.\r |
9189ec20 JW |
46 | \r |
47 | **/\r | |
48 | VOID\r | |
49 | ClearFirst4KPage (\r | |
50 | IN VOID *HobStart\r | |
51 | )\r | |
52 | {\r | |
53 | EFI_PEI_HOB_POINTERS RscHob;\r | |
54 | EFI_PEI_HOB_POINTERS MemHob;\r | |
55 | BOOLEAN DoClear;\r | |
56 | \r | |
57 | RscHob.Raw = HobStart;\r | |
58 | MemHob.Raw = HobStart;\r | |
59 | DoClear = FALSE;\r | |
60 | \r | |
61 | //\r | |
62 | // Check if page 0 exists and free\r | |
63 | //\r | |
64 | while ((RscHob.Raw = GetNextHob (EFI_HOB_TYPE_RESOURCE_DESCRIPTOR,\r | |
65 | RscHob.Raw)) != NULL) {\r | |
66 | if (RscHob.ResourceDescriptor->ResourceType == EFI_RESOURCE_SYSTEM_MEMORY &&\r | |
67 | RscHob.ResourceDescriptor->PhysicalStart == 0) {\r | |
68 | DoClear = TRUE;\r | |
69 | //\r | |
70 | // Make sure memory at 0-4095 has not been allocated.\r | |
71 | //\r | |
72 | while ((MemHob.Raw = GetNextHob (EFI_HOB_TYPE_MEMORY_ALLOCATION,\r | |
73 | MemHob.Raw)) != NULL) {\r | |
74 | if (MemHob.MemoryAllocation->AllocDescriptor.MemoryBaseAddress\r | |
75 | < EFI_PAGE_SIZE) {\r | |
76 | DoClear = FALSE;\r | |
77 | break;\r | |
78 | }\r | |
79 | MemHob.Raw = GET_NEXT_HOB (MemHob);\r | |
80 | }\r | |
81 | break;\r | |
82 | }\r | |
83 | RscHob.Raw = GET_NEXT_HOB (RscHob);\r | |
84 | }\r | |
85 | \r | |
86 | if (DoClear) {\r | |
87 | DEBUG ((DEBUG_INFO, "Clearing first 4K-page!\r\n"));\r | |
88 | SetMem (NULL, EFI_PAGE_SIZE, 0);\r | |
89 | }\r | |
90 | \r | |
91 | return;\r | |
92 | }\r | |
93 | \r | |
382aeac2 DB |
94 | /**\r |
95 | Return configure status of NULL pointer detection feature.\r | |
96 | \r | |
97 | @return TRUE NULL pointer detection feature is enabled\r | |
98 | @return FALSE NULL pointer detection feature is disabled\r | |
99 | \r | |
100 | **/\r | |
9189ec20 JW |
101 | BOOLEAN\r |
102 | IsNullDetectionEnabled (\r | |
103 | VOID\r | |
104 | )\r | |
105 | {\r | |
106 | return ((PcdGet8 (PcdNullPointerDetectionPropertyMask) & BIT0) != 0);\r | |
107 | }\r | |
5997daf7 | 108 | \r |
5630cdfe SZ |
109 | /**\r |
110 | Enable Execute Disable Bit.\r | |
111 | \r | |
112 | **/\r | |
113 | VOID\r | |
114 | EnableExecuteDisableBit (\r | |
115 | VOID\r | |
116 | )\r | |
117 | {\r | |
118 | UINT64 MsrRegisters;\r | |
119 | \r | |
120 | MsrRegisters = AsmReadMsr64 (0xC0000080);\r | |
121 | MsrRegisters |= BIT11;\r | |
122 | AsmWriteMsr64 (0xC0000080, MsrRegisters);\r | |
123 | }\r | |
124 | \r | |
50255363 JW |
125 | /**\r |
126 | The function will check if page table entry should be splitted to smaller\r | |
127 | granularity.\r | |
128 | \r | |
9db7e9fd JW |
129 | @param Address Physical memory address.\r |
130 | @param Size Size of the given physical memory.\r | |
131 | @param StackBase Base address of stack.\r | |
132 | @param StackSize Size of stack.\r | |
133 | \r | |
50255363 JW |
134 | @retval TRUE Page table should be split.\r |
135 | @retval FALSE Page table should not be split.\r | |
136 | **/\r | |
137 | BOOLEAN\r | |
138 | ToSplitPageTable (\r | |
139 | IN EFI_PHYSICAL_ADDRESS Address,\r | |
140 | IN UINTN Size,\r | |
141 | IN EFI_PHYSICAL_ADDRESS StackBase,\r | |
142 | IN UINTN StackSize\r | |
143 | )\r | |
144 | {\r | |
145 | if (IsNullDetectionEnabled () && Address == 0) {\r | |
146 | return TRUE;\r | |
147 | }\r | |
148 | \r | |
149 | if (PcdGetBool (PcdCpuStackGuard)) {\r | |
150 | if (StackBase >= Address && StackBase < (Address + Size)) {\r | |
151 | return TRUE;\r | |
152 | }\r | |
153 | }\r | |
154 | \r | |
155 | if (PcdGetBool (PcdSetNxForStack)) {\r | |
156 | if ((Address < StackBase + StackSize) && ((Address + Size) > StackBase)) {\r | |
157 | return TRUE;\r | |
158 | }\r | |
159 | }\r | |
160 | \r | |
161 | return FALSE;\r | |
162 | }\r | |
2ac1730b JW |
163 | /**\r |
164 | Initialize a buffer pool for page table use only.\r | |
165 | \r | |
166 | To reduce the potential split operation on page table, the pages reserved for\r | |
167 | page table should be allocated in the times of PAGE_TABLE_POOL_UNIT_PAGES and\r | |
168 | at the boundary of PAGE_TABLE_POOL_ALIGNMENT. So the page pool is always\r | |
169 | initialized with number of pages greater than or equal to the given PoolPages.\r | |
170 | \r | |
171 | Once the pages in the pool are used up, this method should be called again to\r | |
172 | reserve at least another PAGE_TABLE_POOL_UNIT_PAGES. But usually this won't\r | |
173 | happen in practice.\r | |
174 | \r | |
175 | @param PoolPages The least page number of the pool to be created.\r | |
176 | \r | |
177 | @retval TRUE The pool is initialized successfully.\r | |
178 | @retval FALSE The memory is out of resource.\r | |
179 | **/\r | |
180 | BOOLEAN\r | |
181 | InitializePageTablePool (\r | |
182 | IN UINTN PoolPages\r | |
183 | )\r | |
184 | {\r | |
185 | VOID *Buffer;\r | |
186 | \r | |
187 | //\r | |
188 | // Always reserve at least PAGE_TABLE_POOL_UNIT_PAGES, including one page for\r | |
189 | // header.\r | |
190 | //\r | |
191 | PoolPages += 1; // Add one page for header.\r | |
192 | PoolPages = ((PoolPages - 1) / PAGE_TABLE_POOL_UNIT_PAGES + 1) *\r | |
193 | PAGE_TABLE_POOL_UNIT_PAGES;\r | |
194 | Buffer = AllocateAlignedPages (PoolPages, PAGE_TABLE_POOL_ALIGNMENT);\r | |
195 | if (Buffer == NULL) {\r | |
196 | DEBUG ((DEBUG_ERROR, "ERROR: Out of aligned pages\r\n"));\r | |
197 | return FALSE;\r | |
198 | }\r | |
199 | \r | |
200 | //\r | |
201 | // Link all pools into a list for easier track later.\r | |
202 | //\r | |
203 | if (mPageTablePool == NULL) {\r | |
204 | mPageTablePool = Buffer;\r | |
205 | mPageTablePool->NextPool = mPageTablePool;\r | |
206 | } else {\r | |
207 | ((PAGE_TABLE_POOL *)Buffer)->NextPool = mPageTablePool->NextPool;\r | |
208 | mPageTablePool->NextPool = Buffer;\r | |
209 | mPageTablePool = Buffer;\r | |
210 | }\r | |
211 | \r | |
212 | //\r | |
213 | // Reserve one page for pool header.\r | |
214 | //\r | |
215 | mPageTablePool->FreePages = PoolPages - 1;\r | |
216 | mPageTablePool->Offset = EFI_PAGES_TO_SIZE (1);\r | |
217 | \r | |
218 | return TRUE;\r | |
219 | }\r | |
220 | \r | |
221 | /**\r | |
222 | This API provides a way to allocate memory for page table.\r | |
223 | \r | |
224 | This API can be called more than once to allocate memory for page tables.\r | |
225 | \r | |
226 | Allocates the number of 4KB pages and returns a pointer to the allocated\r | |
227 | buffer. The buffer returned is aligned on a 4KB boundary.\r | |
228 | \r | |
229 | If Pages is 0, then NULL is returned.\r | |
230 | If there is not enough memory remaining to satisfy the request, then NULL is\r | |
231 | returned.\r | |
232 | \r | |
233 | @param Pages The number of 4 KB pages to allocate.\r | |
234 | \r | |
235 | @return A pointer to the allocated buffer or NULL if allocation fails.\r | |
236 | \r | |
237 | **/\r | |
238 | VOID *\r | |
239 | AllocatePageTableMemory (\r | |
240 | IN UINTN Pages\r | |
241 | )\r | |
242 | {\r | |
243 | VOID *Buffer;\r | |
244 | \r | |
245 | if (Pages == 0) {\r | |
246 | return NULL;\r | |
247 | }\r | |
248 | \r | |
249 | //\r | |
250 | // Renew the pool if necessary.\r | |
251 | //\r | |
252 | if (mPageTablePool == NULL ||\r | |
253 | Pages > mPageTablePool->FreePages) {\r | |
254 | if (!InitializePageTablePool (Pages)) {\r | |
255 | return NULL;\r | |
256 | }\r | |
257 | }\r | |
258 | \r | |
259 | Buffer = (UINT8 *)mPageTablePool + mPageTablePool->Offset;\r | |
260 | \r | |
261 | mPageTablePool->Offset += EFI_PAGES_TO_SIZE (Pages);\r | |
262 | mPageTablePool->FreePages -= Pages;\r | |
263 | \r | |
264 | return Buffer;\r | |
265 | }\r | |
266 | \r | |
5630cdfe SZ |
267 | /**\r |
268 | Split 2M page to 4K.\r | |
269 | \r | |
270 | @param[in] PhysicalAddress Start physical address the 2M page covered.\r | |
271 | @param[in, out] PageEntry2M Pointer to 2M page entry.\r | |
272 | @param[in] StackBase Stack base address.\r | |
273 | @param[in] StackSize Stack size.\r | |
274 | \r | |
275 | **/\r | |
276 | VOID\r | |
277 | Split2MPageTo4K (\r | |
278 | IN EFI_PHYSICAL_ADDRESS PhysicalAddress,\r | |
279 | IN OUT UINT64 *PageEntry2M,\r | |
280 | IN EFI_PHYSICAL_ADDRESS StackBase,\r | |
281 | IN UINTN StackSize\r | |
282 | )\r | |
283 | {\r | |
284 | EFI_PHYSICAL_ADDRESS PhysicalAddress4K;\r | |
285 | UINTN IndexOfPageTableEntries;\r | |
286 | PAGE_TABLE_4K_ENTRY *PageTableEntry;\r | |
5997daf7 LD |
287 | UINT64 AddressEncMask;\r |
288 | \r | |
289 | //\r | |
290 | // Make sure AddressEncMask is contained to smallest supported address field\r | |
291 | //\r | |
292 | AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;\r | |
5630cdfe | 293 | \r |
2ac1730b | 294 | PageTableEntry = AllocatePageTableMemory (1);\r |
36829e67 | 295 | ASSERT (PageTableEntry != NULL);\r |
5997daf7 | 296 | \r |
5630cdfe SZ |
297 | //\r |
298 | // Fill in 2M page entry.\r | |
299 | //\r | |
5997daf7 | 300 | *PageEntry2M = (UINT64) (UINTN) PageTableEntry | AddressEncMask | IA32_PG_P | IA32_PG_RW;\r |
5630cdfe SZ |
301 | \r |
302 | PhysicalAddress4K = PhysicalAddress;\r | |
303 | for (IndexOfPageTableEntries = 0; IndexOfPageTableEntries < 512; IndexOfPageTableEntries++, PageTableEntry++, PhysicalAddress4K += SIZE_4KB) {\r | |
304 | //\r | |
305 | // Fill in the Page Table entries\r | |
306 | //\r | |
5997daf7 | 307 | PageTableEntry->Uint64 = (UINT64) PhysicalAddress4K | AddressEncMask;\r |
5630cdfe | 308 | PageTableEntry->Bits.ReadWrite = 1;\r |
9189ec20 | 309 | \r |
50255363 JW |
310 | if ((IsNullDetectionEnabled () && PhysicalAddress4K == 0) ||\r |
311 | (PcdGetBool (PcdCpuStackGuard) && PhysicalAddress4K == StackBase)) {\r | |
9189ec20 JW |
312 | PageTableEntry->Bits.Present = 0;\r |
313 | } else {\r | |
314 | PageTableEntry->Bits.Present = 1;\r | |
315 | }\r | |
316 | \r | |
317 | if (PcdGetBool (PcdSetNxForStack)\r | |
318 | && (PhysicalAddress4K >= StackBase)\r | |
319 | && (PhysicalAddress4K < StackBase + StackSize)) {\r | |
5630cdfe SZ |
320 | //\r |
321 | // Set Nx bit for stack.\r | |
322 | //\r | |
323 | PageTableEntry->Bits.Nx = 1;\r | |
324 | }\r | |
325 | }\r | |
326 | }\r | |
327 | \r | |
328 | /**\r | |
329 | Split 1G page to 2M.\r | |
330 | \r | |
331 | @param[in] PhysicalAddress Start physical address the 1G page covered.\r | |
332 | @param[in, out] PageEntry1G Pointer to 1G page entry.\r | |
333 | @param[in] StackBase Stack base address.\r | |
334 | @param[in] StackSize Stack size.\r | |
335 | \r | |
336 | **/\r | |
337 | VOID\r | |
338 | Split1GPageTo2M (\r | |
339 | IN EFI_PHYSICAL_ADDRESS PhysicalAddress,\r | |
340 | IN OUT UINT64 *PageEntry1G,\r | |
341 | IN EFI_PHYSICAL_ADDRESS StackBase,\r | |
342 | IN UINTN StackSize\r | |
343 | )\r | |
344 | {\r | |
345 | EFI_PHYSICAL_ADDRESS PhysicalAddress2M;\r | |
346 | UINTN IndexOfPageDirectoryEntries;\r | |
347 | PAGE_TABLE_ENTRY *PageDirectoryEntry;\r | |
5997daf7 LD |
348 | UINT64 AddressEncMask;\r |
349 | \r | |
350 | //\r | |
351 | // Make sure AddressEncMask is contained to smallest supported address field\r | |
352 | //\r | |
353 | AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;\r | |
5630cdfe | 354 | \r |
2ac1730b | 355 | PageDirectoryEntry = AllocatePageTableMemory (1);\r |
36829e67 | 356 | ASSERT (PageDirectoryEntry != NULL);\r |
5997daf7 | 357 | \r |
5630cdfe SZ |
358 | //\r |
359 | // Fill in 1G page entry.\r | |
360 | //\r | |
5997daf7 | 361 | *PageEntry1G = (UINT64) (UINTN) PageDirectoryEntry | AddressEncMask | IA32_PG_P | IA32_PG_RW;\r |
5630cdfe SZ |
362 | \r |
363 | PhysicalAddress2M = PhysicalAddress;\r | |
364 | for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PhysicalAddress2M += SIZE_2MB) {\r | |
50255363 | 365 | if (ToSplitPageTable (PhysicalAddress2M, SIZE_2MB, StackBase, StackSize)) {\r |
5630cdfe | 366 | //\r |
9189ec20 | 367 | // Need to split this 2M page that covers NULL or stack range.\r |
5630cdfe SZ |
368 | //\r |
369 | Split2MPageTo4K (PhysicalAddress2M, (UINT64 *) PageDirectoryEntry, StackBase, StackSize);\r | |
370 | } else {\r | |
371 | //\r | |
372 | // Fill in the Page Directory entries\r | |
373 | //\r | |
5997daf7 | 374 | PageDirectoryEntry->Uint64 = (UINT64) PhysicalAddress2M | AddressEncMask;\r |
5630cdfe SZ |
375 | PageDirectoryEntry->Bits.ReadWrite = 1;\r |
376 | PageDirectoryEntry->Bits.Present = 1;\r | |
377 | PageDirectoryEntry->Bits.MustBe1 = 1;\r | |
378 | }\r | |
379 | }\r | |
380 | }\r | |
381 | \r | |
2ac1730b JW |
382 | /**\r |
383 | Set one page of page table pool memory to be read-only.\r | |
384 | \r | |
385 | @param[in] PageTableBase Base address of page table (CR3).\r | |
386 | @param[in] Address Start address of a page to be set as read-only.\r | |
387 | @param[in] Level4Paging Level 4 paging flag.\r | |
388 | \r | |
389 | **/\r | |
390 | VOID\r | |
391 | SetPageTablePoolReadOnly (\r | |
392 | IN UINTN PageTableBase,\r | |
393 | IN EFI_PHYSICAL_ADDRESS Address,\r | |
394 | IN BOOLEAN Level4Paging\r | |
395 | )\r | |
396 | {\r | |
397 | UINTN Index;\r | |
398 | UINTN EntryIndex;\r | |
399 | UINT64 AddressEncMask;\r | |
400 | EFI_PHYSICAL_ADDRESS PhysicalAddress;\r | |
401 | UINT64 *PageTable;\r | |
402 | UINT64 *NewPageTable;\r | |
403 | UINT64 PageAttr;\r | |
404 | UINT64 LevelSize[5];\r | |
405 | UINT64 LevelMask[5];\r | |
406 | UINTN LevelShift[5];\r | |
407 | UINTN Level;\r | |
408 | UINT64 PoolUnitSize;\r | |
409 | \r | |
410 | ASSERT (PageTableBase != 0);\r | |
411 | \r | |
412 | //\r | |
413 | // Since the page table is always from page table pool, which is always\r | |
414 | // located at the boundary of PcdPageTablePoolAlignment, we just need to\r | |
415 | // set the whole pool unit to be read-only.\r | |
416 | //\r | |
417 | Address = Address & PAGE_TABLE_POOL_ALIGN_MASK;\r | |
418 | \r | |
419 | LevelShift[1] = PAGING_L1_ADDRESS_SHIFT;\r | |
420 | LevelShift[2] = PAGING_L2_ADDRESS_SHIFT;\r | |
421 | LevelShift[3] = PAGING_L3_ADDRESS_SHIFT;\r | |
422 | LevelShift[4] = PAGING_L4_ADDRESS_SHIFT;\r | |
423 | \r | |
424 | LevelMask[1] = PAGING_4K_ADDRESS_MASK_64;\r | |
425 | LevelMask[2] = PAGING_2M_ADDRESS_MASK_64;\r | |
426 | LevelMask[3] = PAGING_1G_ADDRESS_MASK_64;\r | |
427 | LevelMask[4] = PAGING_1G_ADDRESS_MASK_64;\r | |
428 | \r | |
429 | LevelSize[1] = SIZE_4KB;\r | |
430 | LevelSize[2] = SIZE_2MB;\r | |
431 | LevelSize[3] = SIZE_1GB;\r | |
432 | LevelSize[4] = SIZE_512GB;\r | |
433 | \r | |
434 | AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) &\r | |
435 | PAGING_1G_ADDRESS_MASK_64;\r | |
436 | PageTable = (UINT64 *)(UINTN)PageTableBase;\r | |
437 | PoolUnitSize = PAGE_TABLE_POOL_UNIT_SIZE;\r | |
438 | \r | |
439 | for (Level = (Level4Paging) ? 4 : 3; Level > 0; --Level) {\r | |
440 | Index = ((UINTN)RShiftU64 (Address, LevelShift[Level]));\r | |
441 | Index &= PAGING_PAE_INDEX_MASK;\r | |
442 | \r | |
443 | PageAttr = PageTable[Index];\r | |
444 | if ((PageAttr & IA32_PG_PS) == 0) {\r | |
445 | //\r | |
446 | // Go to next level of table.\r | |
447 | //\r | |
448 | PageTable = (UINT64 *)(UINTN)(PageAttr & ~AddressEncMask &\r | |
449 | PAGING_4K_ADDRESS_MASK_64);\r | |
450 | continue;\r | |
451 | }\r | |
452 | \r | |
453 | if (PoolUnitSize >= LevelSize[Level]) {\r | |
454 | //\r | |
455 | // Clear R/W bit if current page granularity is not larger than pool unit\r | |
456 | // size.\r | |
457 | //\r | |
458 | if ((PageAttr & IA32_PG_RW) != 0) {\r | |
459 | while (PoolUnitSize > 0) {\r | |
460 | //\r | |
461 | // PAGE_TABLE_POOL_UNIT_SIZE and PAGE_TABLE_POOL_ALIGNMENT are fit in\r | |
462 | // one page (2MB). Then we don't need to update attributes for pages\r | |
463 | // crossing page directory. ASSERT below is for that purpose.\r | |
464 | //\r | |
465 | ASSERT (Index < EFI_PAGE_SIZE/sizeof (UINT64));\r | |
466 | \r | |
467 | PageTable[Index] &= ~(UINT64)IA32_PG_RW;\r | |
468 | PoolUnitSize -= LevelSize[Level];\r | |
469 | \r | |
470 | ++Index;\r | |
471 | }\r | |
472 | }\r | |
473 | \r | |
474 | break;\r | |
475 | \r | |
476 | } else {\r | |
477 | //\r | |
478 | // The smaller granularity of page must be needed.\r | |
479 | //\r | |
41b4600c JW |
480 | ASSERT (Level > 1);\r |
481 | \r | |
2ac1730b JW |
482 | NewPageTable = AllocatePageTableMemory (1);\r |
483 | ASSERT (NewPageTable != NULL);\r | |
484 | \r | |
485 | PhysicalAddress = PageAttr & LevelMask[Level];\r | |
486 | for (EntryIndex = 0;\r | |
487 | EntryIndex < EFI_PAGE_SIZE/sizeof (UINT64);\r | |
488 | ++EntryIndex) {\r | |
489 | NewPageTable[EntryIndex] = PhysicalAddress | AddressEncMask |\r | |
490 | IA32_PG_P | IA32_PG_RW;\r | |
41b4600c | 491 | if (Level > 2) {\r |
2ac1730b JW |
492 | NewPageTable[EntryIndex] |= IA32_PG_PS;\r |
493 | }\r | |
41b4600c | 494 | PhysicalAddress += LevelSize[Level - 1];\r |
2ac1730b JW |
495 | }\r |
496 | \r | |
497 | PageTable[Index] = (UINT64)(UINTN)NewPageTable | AddressEncMask |\r | |
498 | IA32_PG_P | IA32_PG_RW;\r | |
499 | PageTable = NewPageTable;\r | |
500 | }\r | |
501 | }\r | |
502 | }\r | |
503 | \r | |
504 | /**\r | |
505 | Prevent the memory pages used for page table from been overwritten.\r | |
506 | \r | |
507 | @param[in] PageTableBase Base address of page table (CR3).\r | |
508 | @param[in] Level4Paging Level 4 paging flag.\r | |
509 | \r | |
510 | **/\r | |
511 | VOID\r | |
512 | EnablePageTableProtection (\r | |
513 | IN UINTN PageTableBase,\r | |
514 | IN BOOLEAN Level4Paging\r | |
515 | )\r | |
516 | {\r | |
517 | PAGE_TABLE_POOL *HeadPool;\r | |
518 | PAGE_TABLE_POOL *Pool;\r | |
519 | UINT64 PoolSize;\r | |
520 | EFI_PHYSICAL_ADDRESS Address;\r | |
521 | \r | |
522 | if (mPageTablePool == NULL) {\r | |
523 | return;\r | |
524 | }\r | |
525 | \r | |
526 | //\r | |
527 | // Disable write protection, because we need to mark page table to be write\r | |
528 | // protected.\r | |
529 | //\r | |
530 | AsmWriteCr0 (AsmReadCr0() & ~CR0_WP);\r | |
531 | \r | |
532 | //\r | |
533 | // SetPageTablePoolReadOnly might update mPageTablePool. It's safer to\r | |
534 | // remember original one in advance.\r | |
535 | //\r | |
536 | HeadPool = mPageTablePool;\r | |
537 | Pool = HeadPool;\r | |
538 | do {\r | |
539 | Address = (EFI_PHYSICAL_ADDRESS)(UINTN)Pool;\r | |
540 | PoolSize = Pool->Offset + EFI_PAGES_TO_SIZE (Pool->FreePages);\r | |
541 | \r | |
542 | //\r | |
543 | // The size of one pool must be multiple of PAGE_TABLE_POOL_UNIT_SIZE, which\r | |
544 | // is one of page size of the processor (2MB by default). Let's apply the\r | |
545 | // protection to them one by one.\r | |
546 | //\r | |
547 | while (PoolSize > 0) {\r | |
548 | SetPageTablePoolReadOnly(PageTableBase, Address, Level4Paging);\r | |
549 | Address += PAGE_TABLE_POOL_UNIT_SIZE;\r | |
550 | PoolSize -= PAGE_TABLE_POOL_UNIT_SIZE;\r | |
551 | }\r | |
552 | \r | |
553 | Pool = Pool->NextPool;\r | |
554 | } while (Pool != HeadPool);\r | |
555 | \r | |
556 | //\r | |
557 | // Enable write protection, after page table attribute updated.\r | |
558 | //\r | |
559 | AsmWriteCr0 (AsmReadCr0() | CR0_WP);\r | |
560 | }\r | |
561 | \r | |
f3b33289 | 562 | /**\r |
563 | Allocates and fills in the Page Directory and Page Table Entries to\r | |
564 | establish a 1:1 Virtual to Physical mapping.\r | |
565 | \r | |
5630cdfe SZ |
566 | @param[in] StackBase Stack base address.\r |
567 | @param[in] StackSize Stack size.\r | |
f3b33289 | 568 | \r |
48557c65 | 569 | @return The address of 4 level page map.\r |
f3b33289 | 570 | \r |
571 | **/\r | |
572 | UINTN\r | |
573 | CreateIdentityMappingPageTables (\r | |
5630cdfe SZ |
574 | IN EFI_PHYSICAL_ADDRESS StackBase,\r |
575 | IN UINTN StackSize\r | |
f3b33289 | 576 | )\r |
577 | { \r | |
c56b6566 JY |
578 | UINT32 RegEax;\r |
579 | UINT32 RegEdx;\r | |
f3b33289 | 580 | UINT8 PhysicalAddressBits;\r |
581 | EFI_PHYSICAL_ADDRESS PageAddress;\r | |
582 | UINTN IndexOfPml4Entries;\r | |
583 | UINTN IndexOfPdpEntries;\r | |
584 | UINTN IndexOfPageDirectoryEntries;\r | |
4140a663 | 585 | UINT32 NumberOfPml4EntriesNeeded;\r |
586 | UINT32 NumberOfPdpEntriesNeeded;\r | |
f3b33289 | 587 | PAGE_MAP_AND_DIRECTORY_POINTER *PageMapLevel4Entry;\r |
588 | PAGE_MAP_AND_DIRECTORY_POINTER *PageMap;\r | |
589 | PAGE_MAP_AND_DIRECTORY_POINTER *PageDirectoryPointerEntry;\r | |
590 | PAGE_TABLE_ENTRY *PageDirectoryEntry;\r | |
591 | UINTN TotalPagesNum;\r | |
592 | UINTN BigPageAddress;\r | |
593 | VOID *Hob;\r | |
c56b6566 JY |
594 | BOOLEAN Page1GSupport;\r |
595 | PAGE_TABLE_1G_ENTRY *PageDirectory1GEntry;\r | |
5997daf7 LD |
596 | UINT64 AddressEncMask;\r |
597 | \r | |
598 | //\r | |
599 | // Make sure AddressEncMask is contained to smallest supported address field\r | |
600 | //\r | |
601 | AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;\r | |
c56b6566 JY |
602 | \r |
603 | Page1GSupport = FALSE;\r | |
378175d2 JY |
604 | if (PcdGetBool(PcdUse1GPageTable)) {\r |
605 | AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);\r | |
606 | if (RegEax >= 0x80000001) {\r | |
607 | AsmCpuid (0x80000001, NULL, NULL, NULL, &RegEdx);\r | |
608 | if ((RegEdx & BIT26) != 0) {\r | |
609 | Page1GSupport = TRUE;\r | |
610 | }\r | |
c56b6566 JY |
611 | }\r |
612 | }\r | |
f3b33289 | 613 | \r |
614 | //\r | |
c56b6566 | 615 | // Get physical address bits supported.\r |
f3b33289 | 616 | //\r |
f3b33289 | 617 | Hob = GetFirstHob (EFI_HOB_TYPE_CPU);\r |
618 | if (Hob != NULL) {\r | |
48557c65 | 619 | PhysicalAddressBits = ((EFI_HOB_CPU *) Hob)->SizeOfMemorySpace;\r |
c56b6566 JY |
620 | } else {\r |
621 | AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);\r | |
622 | if (RegEax >= 0x80000008) {\r | |
623 | AsmCpuid (0x80000008, &RegEax, NULL, NULL, NULL);\r | |
624 | PhysicalAddressBits = (UINT8) RegEax;\r | |
625 | } else {\r | |
626 | PhysicalAddressBits = 36;\r | |
627 | }\r | |
f3b33289 | 628 | }\r |
629 | \r | |
4140a663 | 630 | //\r |
631 | // IA-32e paging translates 48-bit linear addresses to 52-bit physical addresses.\r | |
632 | //\r | |
633 | ASSERT (PhysicalAddressBits <= 52);\r | |
634 | if (PhysicalAddressBits > 48) {\r | |
635 | PhysicalAddressBits = 48;\r | |
636 | }\r | |
637 | \r | |
f3b33289 | 638 | //\r |
639 | // Calculate the table entries needed.\r | |
640 | //\r | |
641 | if (PhysicalAddressBits <= 39 ) {\r | |
642 | NumberOfPml4EntriesNeeded = 1;\r | |
c56b6566 | 643 | NumberOfPdpEntriesNeeded = (UINT32)LShiftU64 (1, (PhysicalAddressBits - 30));\r |
f3b33289 | 644 | } else {\r |
c56b6566 | 645 | NumberOfPml4EntriesNeeded = (UINT32)LShiftU64 (1, (PhysicalAddressBits - 39));\r |
f3b33289 | 646 | NumberOfPdpEntriesNeeded = 512;\r |
647 | }\r | |
648 | \r | |
649 | //\r | |
650 | // Pre-allocate big pages to avoid later allocations. \r | |
651 | //\r | |
c56b6566 JY |
652 | if (!Page1GSupport) {\r |
653 | TotalPagesNum = (NumberOfPdpEntriesNeeded + 1) * NumberOfPml4EntriesNeeded + 1;\r | |
654 | } else {\r | |
655 | TotalPagesNum = NumberOfPml4EntriesNeeded + 1;\r | |
656 | }\r | |
2ac1730b | 657 | BigPageAddress = (UINTN) AllocatePageTableMemory (TotalPagesNum);\r |
f3b33289 | 658 | ASSERT (BigPageAddress != 0);\r |
659 | \r | |
660 | //\r | |
661 | // By architecture only one PageMapLevel4 exists - so lets allocate storage for it.\r | |
662 | //\r | |
663 | PageMap = (VOID *) BigPageAddress;\r | |
c56b6566 | 664 | BigPageAddress += SIZE_4KB;\r |
f3b33289 | 665 | \r |
666 | PageMapLevel4Entry = PageMap;\r | |
667 | PageAddress = 0;\r | |
668 | for (IndexOfPml4Entries = 0; IndexOfPml4Entries < NumberOfPml4EntriesNeeded; IndexOfPml4Entries++, PageMapLevel4Entry++) {\r | |
669 | //\r | |
670 | // Each PML4 entry points to a page of Page Directory Pointer entires.\r | |
671 | // So lets allocate space for them and fill them in in the IndexOfPdpEntries loop.\r | |
672 | //\r | |
673 | PageDirectoryPointerEntry = (VOID *) BigPageAddress;\r | |
c56b6566 | 674 | BigPageAddress += SIZE_4KB;\r |
f3b33289 | 675 | \r |
676 | //\r | |
677 | // Make a PML4 Entry\r | |
678 | //\r | |
5997daf7 | 679 | PageMapLevel4Entry->Uint64 = (UINT64)(UINTN)PageDirectoryPointerEntry | AddressEncMask;\r |
f3b33289 | 680 | PageMapLevel4Entry->Bits.ReadWrite = 1;\r |
681 | PageMapLevel4Entry->Bits.Present = 1;\r | |
682 | \r | |
c56b6566 | 683 | if (Page1GSupport) {\r |
54d3b84e | 684 | PageDirectory1GEntry = (VOID *) PageDirectoryPointerEntry;\r |
c56b6566 JY |
685 | \r |
686 | for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectory1GEntry++, PageAddress += SIZE_1GB) {\r | |
50255363 | 687 | if (ToSplitPageTable (PageAddress, SIZE_1GB, StackBase, StackSize)) {\r |
5630cdfe SZ |
688 | Split1GPageTo2M (PageAddress, (UINT64 *) PageDirectory1GEntry, StackBase, StackSize);\r |
689 | } else {\r | |
690 | //\r | |
691 | // Fill in the Page Directory entries\r | |
692 | //\r | |
5997daf7 | 693 | PageDirectory1GEntry->Uint64 = (UINT64)PageAddress | AddressEncMask;\r |
5630cdfe SZ |
694 | PageDirectory1GEntry->Bits.ReadWrite = 1;\r |
695 | PageDirectory1GEntry->Bits.Present = 1;\r | |
696 | PageDirectory1GEntry->Bits.MustBe1 = 1;\r | |
697 | }\r | |
c56b6566 JY |
698 | }\r |
699 | } else {\r | |
700 | for (IndexOfPdpEntries = 0; IndexOfPdpEntries < NumberOfPdpEntriesNeeded; IndexOfPdpEntries++, PageDirectoryPointerEntry++) {\r | |
701 | //\r | |
702 | // Each Directory Pointer entries points to a page of Page Directory entires.\r | |
703 | // So allocate space for them and fill them in in the IndexOfPageDirectoryEntries loop.\r | |
704 | // \r | |
705 | PageDirectoryEntry = (VOID *) BigPageAddress;\r | |
706 | BigPageAddress += SIZE_4KB;\r | |
707 | \r | |
708 | //\r | |
709 | // Fill in a Page Directory Pointer Entries\r | |
710 | //\r | |
5997daf7 | 711 | PageDirectoryPointerEntry->Uint64 = (UINT64)(UINTN)PageDirectoryEntry | AddressEncMask;\r |
c56b6566 JY |
712 | PageDirectoryPointerEntry->Bits.ReadWrite = 1;\r |
713 | PageDirectoryPointerEntry->Bits.Present = 1;\r | |
714 | \r | |
715 | for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PageAddress += SIZE_2MB) {\r | |
50255363 | 716 | if (ToSplitPageTable (PageAddress, SIZE_2MB, StackBase, StackSize)) {\r |
5630cdfe | 717 | //\r |
9189ec20 | 718 | // Need to split this 2M page that covers NULL or stack range.\r |
5630cdfe SZ |
719 | //\r |
720 | Split2MPageTo4K (PageAddress, (UINT64 *) PageDirectoryEntry, StackBase, StackSize);\r | |
721 | } else {\r | |
722 | //\r | |
723 | // Fill in the Page Directory entries\r | |
724 | //\r | |
5997daf7 | 725 | PageDirectoryEntry->Uint64 = (UINT64)PageAddress | AddressEncMask;\r |
5630cdfe SZ |
726 | PageDirectoryEntry->Bits.ReadWrite = 1;\r |
727 | PageDirectoryEntry->Bits.Present = 1;\r | |
728 | PageDirectoryEntry->Bits.MustBe1 = 1;\r | |
729 | }\r | |
c56b6566 JY |
730 | }\r |
731 | }\r | |
f3b33289 | 732 | \r |
c56b6566 JY |
733 | for (; IndexOfPdpEntries < 512; IndexOfPdpEntries++, PageDirectoryPointerEntry++) {\r |
734 | ZeroMem (\r | |
735 | PageDirectoryPointerEntry,\r | |
736 | sizeof(PAGE_MAP_AND_DIRECTORY_POINTER)\r | |
737 | );\r | |
f3b33289 | 738 | }\r |
739 | }\r | |
740 | }\r | |
741 | \r | |
742 | //\r | |
743 | // For the PML4 entries we are not using fill in a null entry.\r | |
f3b33289 | 744 | //\r |
745 | for (; IndexOfPml4Entries < 512; IndexOfPml4Entries++, PageMapLevel4Entry++) {\r | |
c56b6566 JY |
746 | ZeroMem (\r |
747 | PageMapLevel4Entry,\r | |
748 | sizeof (PAGE_MAP_AND_DIRECTORY_POINTER)\r | |
749 | );\r | |
f3b33289 | 750 | }\r |
751 | \r | |
2ac1730b JW |
752 | //\r |
753 | // Protect the page table by marking the memory used for page table to be\r | |
754 | // read-only.\r | |
755 | //\r | |
756 | EnablePageTableProtection ((UINTN)PageMap, TRUE);\r | |
757 | \r | |
5630cdfe SZ |
758 | if (PcdGetBool (PcdSetNxForStack)) {\r |
759 | EnableExecuteDisableBit ();\r | |
760 | }\r | |
761 | \r | |
f3b33289 | 762 | return (UINTN)PageMap;\r |
763 | }\r | |
764 | \r |