]> git.proxmox.com Git - mirror_edk2.git/blame - OvmfPkg/Library/PeilessStartupLib/X64/VirtualMemory.c
OvmfPkg/PeilessStartupLib: move mPageTablePool to stack
[mirror_edk2.git] / OvmfPkg / Library / PeilessStartupLib / X64 / VirtualMemory.c
CommitLineData
4fe26784
MX
1/** @file\r
2 x64-specifc functionality for Page Table Setup.\r
3\r
4Copyright (c) 2006 - 2020, Intel Corporation. All rights reserved.<BR>\r
5SPDX-License-Identifier: BSD-2-Clause-Patent\r
6**/\r
7\r
8#include <Uefi/UefiBaseType.h>\r
9#include <Uefi/UefiSpec.h>\r
10#include <Pi/PiBootMode.h>\r
11#include <Pi/PiHob.h>\r
12#include <Library/DebugLib.h>\r
13#include <Library/BaseLib.h>\r
14#include <Library/HobLib.h>\r
15#include <Library/BaseMemoryLib.h>\r
16#include <Library/MemoryAllocationLib.h>\r
17#include <Library/PcdLib.h>\r
18#include <Guid/MemoryTypeInformation.h>\r
19#include <Guid/MemoryAllocationHob.h>\r
20#include <Register/Intel/Cpuid.h>\r
21#include <Library/PlatformInitLib.h>\r
22#include "PageTables.h"\r
23\r
4fe26784
MX
24UINTN mLevelShift[5] = {\r
25 0,\r
26 PAGING_L1_ADDRESS_SHIFT,\r
27 PAGING_L2_ADDRESS_SHIFT,\r
28 PAGING_L3_ADDRESS_SHIFT,\r
29 PAGING_L4_ADDRESS_SHIFT\r
30};\r
31\r
32UINT64 mLevelMask[5] = {\r
33 0,\r
34 PAGING_4K_ADDRESS_MASK_64,\r
35 PAGING_2M_ADDRESS_MASK_64,\r
36 PAGING_1G_ADDRESS_MASK_64,\r
37 PAGING_1G_ADDRESS_MASK_64\r
38};\r
39\r
40UINT64 mLevelSize[5] = {\r
41 0,\r
42 SIZE_4KB,\r
43 SIZE_2MB,\r
44 SIZE_1GB,\r
45 SIZE_512GB\r
46};\r
47\r
48BOOLEAN\r
49IsSetNxForStack (\r
50 VOID\r
51 )\r
52{\r
53 EFI_HOB_GUID_TYPE *GuidHob;\r
54 EFI_HOB_PLATFORM_INFO *PlatformInfo;\r
55\r
56 GuidHob = GetFirstGuidHob (&gUefiOvmfPkgPlatformInfoGuid);\r
57 if (GuidHob == NULL) {\r
58 ASSERT (FALSE);\r
59 return FALSE;\r
60 }\r
61\r
62 PlatformInfo = (EFI_HOB_PLATFORM_INFO *)GET_GUID_HOB_DATA (GuidHob);\r
63\r
64 return PlatformInfo->PcdSetNxForStack;\r
65}\r
66\r
67/**\r
68 Clear legacy memory located at the first 4K-page, if available.\r
69\r
70 This function traverses the whole HOB list to check if memory from 0 to 4095\r
71 exists and has not been allocated, and then clear it if so.\r
72\r
73 @param HobStart The start of HobList passed to DxeCore.\r
74\r
75**/\r
76VOID\r
77ClearFirst4KPage (\r
78 IN VOID *HobStart\r
79 )\r
80{\r
81 EFI_PEI_HOB_POINTERS RscHob;\r
82 EFI_PEI_HOB_POINTERS MemHob;\r
83 BOOLEAN DoClear;\r
84\r
85 RscHob.Raw = HobStart;\r
86 MemHob.Raw = HobStart;\r
87 DoClear = FALSE;\r
88\r
89 //\r
90 // Check if page 0 exists and free\r
91 //\r
92 while ((RscHob.Raw = GetNextHob (\r
93 EFI_HOB_TYPE_RESOURCE_DESCRIPTOR,\r
94 RscHob.Raw\r
95 )) != NULL)\r
96 {\r
97 if ((RscHob.ResourceDescriptor->ResourceType == EFI_RESOURCE_SYSTEM_MEMORY) &&\r
98 (RscHob.ResourceDescriptor->PhysicalStart == 0))\r
99 {\r
100 DoClear = TRUE;\r
101 //\r
102 // Make sure memory at 0-4095 has not been allocated.\r
103 //\r
104 while ((MemHob.Raw = GetNextHob (\r
105 EFI_HOB_TYPE_MEMORY_ALLOCATION,\r
106 MemHob.Raw\r
107 )) != NULL)\r
108 {\r
109 if (MemHob.MemoryAllocation->AllocDescriptor.MemoryBaseAddress\r
110 < EFI_PAGE_SIZE)\r
111 {\r
112 DoClear = FALSE;\r
113 break;\r
114 }\r
115\r
116 MemHob.Raw = GET_NEXT_HOB (MemHob);\r
117 }\r
118\r
119 break;\r
120 }\r
121\r
122 RscHob.Raw = GET_NEXT_HOB (RscHob);\r
123 }\r
124\r
125 if (DoClear) {\r
126 DEBUG ((DEBUG_INFO, "Clearing first 4K-page!\r\n"));\r
127 SetMem (NULL, EFI_PAGE_SIZE, 0);\r
128 }\r
129\r
130 return;\r
131}\r
132\r
133/**\r
134 Return configure status of NULL pointer detection feature.\r
135\r
136 @return TRUE NULL pointer detection feature is enabled\r
137 @return FALSE NULL pointer detection feature is disabled\r
138\r
139**/\r
140BOOLEAN\r
141IsNullDetectionEnabled (\r
142 VOID\r
143 )\r
144{\r
145 return ((PcdGet8 (PcdNullPointerDetectionPropertyMask) & BIT0) != 0);\r
146}\r
147\r
148/**\r
149 The function will check if Execute Disable Bit is available.\r
150\r
151 @retval TRUE Execute Disable Bit is available.\r
152 @retval FALSE Execute Disable Bit is not available.\r
153\r
154**/\r
155BOOLEAN\r
156IsExecuteDisableBitAvailable (\r
157 VOID\r
158 )\r
159{\r
160 UINT32 RegEax;\r
161 UINT32 RegEdx;\r
162 BOOLEAN Available;\r
163\r
164 Available = FALSE;\r
165 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);\r
166 if (RegEax >= 0x80000001) {\r
167 AsmCpuid (0x80000001, NULL, NULL, NULL, &RegEdx);\r
168 if ((RegEdx & BIT20) != 0) {\r
169 //\r
170 // Bit 20: Execute Disable Bit available.\r
171 //\r
172 Available = TRUE;\r
173 }\r
174 }\r
175\r
176 return Available;\r
177}\r
178\r
179/**\r
180 Check if Execute Disable Bit (IA32_EFER.NXE) should be enabled or not.\r
181\r
182 @retval TRUE IA32_EFER.NXE should be enabled.\r
183 @retval FALSE IA32_EFER.NXE should not be enabled.\r
184\r
185**/\r
186BOOLEAN\r
187IsEnableNonExecNeeded (\r
188 VOID\r
189 )\r
190{\r
191 if (!IsExecuteDisableBitAvailable ()) {\r
192 return FALSE;\r
193 }\r
194\r
195 //\r
196 // XD flag (BIT63) in page table entry is only valid if IA32_EFER.NXE is set.\r
197 // Features controlled by Following PCDs need this feature to be enabled.\r
198 //\r
199 return (IsSetNxForStack () ||\r
200 FixedPcdGet64 (PcdDxeNxMemoryProtectionPolicy) != 0 ||\r
201 PcdGet32 (PcdImageProtectionPolicy) != 0);\r
202}\r
203\r
204/**\r
205 Enable Execute Disable Bit.\r
206\r
207**/\r
208VOID\r
209EnableExecuteDisableBit (\r
210 VOID\r
211 )\r
212{\r
213 UINT64 MsrRegisters;\r
214\r
215 MsrRegisters = AsmReadMsr64 (0xC0000080);\r
216 MsrRegisters |= BIT11;\r
217 AsmWriteMsr64 (0xC0000080, MsrRegisters);\r
218}\r
219\r
220/**\r
221 The function will check if page table entry should be splitted to smaller\r
222 granularity.\r
223\r
224 @param Address Physical memory address.\r
225 @param Size Size of the given physical memory.\r
226 @param StackBase Base address of stack.\r
227 @param StackSize Size of stack.\r
228\r
229 @retval TRUE Page table should be split.\r
230 @retval FALSE Page table should not be split.\r
231**/\r
232BOOLEAN\r
233ToSplitPageTable (\r
234 IN EFI_PHYSICAL_ADDRESS Address,\r
235 IN UINTN Size,\r
236 IN EFI_PHYSICAL_ADDRESS StackBase,\r
237 IN UINTN StackSize\r
238 )\r
239{\r
240 if (IsNullDetectionEnabled () && (Address == 0)) {\r
241 return TRUE;\r
242 }\r
243\r
244 if (FixedPcdGetBool (PcdCpuStackGuard)) {\r
245 if ((StackBase >= Address) && (StackBase < (Address + Size))) {\r
246 return TRUE;\r
247 }\r
248 }\r
249\r
250 if (IsSetNxForStack ()) {\r
251 if ((Address < StackBase + StackSize) && ((Address + Size) > StackBase)) {\r
252 return TRUE;\r
253 }\r
254 }\r
255\r
256 return FALSE;\r
257}\r
258\r
259/**\r
260 Initialize a buffer pool for page table use only.\r
261\r
262 To reduce the potential split operation on page table, the pages reserved for\r
263 page table should be allocated in the times of PAGE_TABLE_POOL_UNIT_PAGES and\r
264 at the boundary of PAGE_TABLE_POOL_ALIGNMENT. So the page pool is always\r
265 initialized with number of pages greater than or equal to the given PoolPages.\r
266\r
267 Once the pages in the pool are used up, this method should be called again to\r
268 reserve at least another PAGE_TABLE_POOL_UNIT_PAGES. But usually this won't\r
269 happen in practice.\r
270\r
f4d53900
MX
271 @param[in] PoolPages The least page number of the pool to be created.\r
272 @param[in, out] PageTablePool Pointer of Pointer to the current available memory\r
273 used as page table.\r
4fe26784
MX
274\r
275 @retval TRUE The pool is initialized successfully.\r
276 @retval FALSE The memory is out of resource.\r
277**/\r
278BOOLEAN\r
279InitializePageTablePool (\r
f4d53900
MX
280 IN UINTN PoolPages,\r
281 IN OUT PAGE_TABLE_POOL **PageTablePool\r
4fe26784
MX
282 )\r
283{\r
284 VOID *Buffer;\r
285\r
286 DEBUG ((DEBUG_INFO, "InitializePageTablePool PoolPages=%d\n", PoolPages));\r
287\r
288 //\r
289 // Always reserve at least PAGE_TABLE_POOL_UNIT_PAGES, including one page for\r
290 // header.\r
291 //\r
292 PoolPages += 1; // Add one page for header.\r
293 PoolPages = ((PoolPages - 1) / PAGE_TABLE_POOL_UNIT_PAGES + 1) *\r
294 PAGE_TABLE_POOL_UNIT_PAGES;\r
295 Buffer = AllocateAlignedPages (PoolPages, PAGE_TABLE_POOL_ALIGNMENT);\r
296 if (Buffer == NULL) {\r
297 DEBUG ((DEBUG_ERROR, "ERROR: Out of aligned pages\r\n"));\r
298 return FALSE;\r
299 }\r
300\r
301 //\r
302 // Link all pools into a list for easier track later.\r
303 //\r
f4d53900
MX
304 if (*PageTablePool == NULL) {\r
305 *(UINT64 *)(UINTN)PageTablePool = (UINT64)(UINTN)Buffer;\r
306 (*PageTablePool)->NextPool = *PageTablePool;\r
4fe26784 307 } else {\r
f4d53900
MX
308 ((PAGE_TABLE_POOL *)Buffer)->NextPool = (*PageTablePool)->NextPool;\r
309 (*PageTablePool)->NextPool = Buffer;\r
310 *PageTablePool = Buffer;\r
4fe26784
MX
311 }\r
312\r
313 //\r
314 // Reserve one page for pool header.\r
315 //\r
f4d53900
MX
316 (*PageTablePool)->FreePages = PoolPages - 1;\r
317 (*PageTablePool)->Offset = EFI_PAGES_TO_SIZE (1);\r
4fe26784
MX
318\r
319 return TRUE;\r
320}\r
321\r
322/**\r
323 This API provides a way to allocate memory for page table.\r
324\r
325 This API can be called more than once to allocate memory for page tables.\r
326\r
327 Allocates the number of 4KB pages and returns a pointer to the allocated\r
328 buffer. The buffer returned is aligned on a 4KB boundary.\r
329\r
330 If Pages is 0, then NULL is returned.\r
331 If there is not enough memory remaining to satisfy the request, then NULL is\r
332 returned.\r
333\r
f4d53900
MX
334 @param[in] Pages The number of 4 KB pages to allocate.\r
335 @param[in, out] PageTablePool Pointer of pointer to the current available\r
336 memory used as page table.\r
4fe26784
MX
337\r
338 @return A pointer to the allocated buffer or NULL if allocation fails.\r
339\r
340**/\r
341VOID *\r
342AllocatePageTableMemory (\r
f4d53900
MX
343 IN UINTN Pages,\r
344 IN OUT PAGE_TABLE_POOL **PageTablePool\r
4fe26784
MX
345 )\r
346{\r
347 VOID *Buffer;\r
348\r
349 if (Pages == 0) {\r
350 return NULL;\r
351 }\r
352\r
f4d53900 353 DEBUG ((DEBUG_INFO, "AllocatePageTableMemory. PageTablePool=%p, Pages=%d\n", *PageTablePool, Pages));\r
4fe26784
MX
354 //\r
355 // Renew the pool if necessary.\r
356 //\r
f4d53900
MX
357 if ((*PageTablePool == NULL) ||\r
358 (Pages > (*PageTablePool)->FreePages))\r
4fe26784 359 {\r
f4d53900 360 if (!InitializePageTablePool (Pages, PageTablePool)) {\r
4fe26784
MX
361 return NULL;\r
362 }\r
363 }\r
364\r
f4d53900 365 Buffer = (UINT8 *)(*PageTablePool) + (*PageTablePool)->Offset;\r
4fe26784 366\r
f4d53900
MX
367 (*PageTablePool)->Offset += EFI_PAGES_TO_SIZE (Pages);\r
368 (*PageTablePool)->FreePages -= Pages;\r
4fe26784
MX
369\r
370 DEBUG ((\r
371 DEBUG_INFO,\r
f4d53900 372 "%a:%a: Buffer=0x%Lx Pages=%ld, PageTablePool=%p\n",\r
4fe26784
MX
373 gEfiCallerBaseName,\r
374 __FUNCTION__,\r
375 Buffer,\r
f4d53900
MX
376 Pages,\r
377 *PageTablePool\r
4fe26784
MX
378 ));\r
379\r
380 return Buffer;\r
381}\r
382\r
383/**\r
384 Split 2M page to 4K.\r
385\r
386 @param[in] PhysicalAddress Start physical address the 2M page covered.\r
387 @param[in, out] PageEntry2M Pointer to 2M page entry.\r
388 @param[in] StackBase Stack base address.\r
389 @param[in] StackSize Stack size.\r
f4d53900
MX
390 @param[in, out] PageTablePool Pointer to the current available memory used as\r
391 page table.\r
4fe26784
MX
392\r
393**/\r
394VOID\r
395Split2MPageTo4K (\r
396 IN EFI_PHYSICAL_ADDRESS PhysicalAddress,\r
397 IN OUT UINT64 *PageEntry2M,\r
398 IN EFI_PHYSICAL_ADDRESS StackBase,\r
f4d53900
MX
399 IN UINTN StackSize,\r
400 IN OUT PAGE_TABLE_POOL *PageTablePool\r
4fe26784
MX
401 )\r
402{\r
403 EFI_PHYSICAL_ADDRESS PhysicalAddress4K;\r
404 UINTN IndexOfPageTableEntries;\r
405 PAGE_TABLE_4K_ENTRY *PageTableEntry;\r
406\r
407 DEBUG ((DEBUG_INFO, "Split2MPageTo4K\n"));\r
408\r
f4d53900 409 PageTableEntry = AllocatePageTableMemory (1, &PageTablePool);\r
4fe26784
MX
410\r
411 if (PageTableEntry == NULL) {\r
412 ASSERT (FALSE);\r
413 return;\r
414 }\r
415\r
416 //\r
417 // Fill in 2M page entry.\r
418 //\r
419 *PageEntry2M = (UINT64)(UINTN)PageTableEntry | IA32_PG_P | IA32_PG_RW;\r
420\r
421 PhysicalAddress4K = PhysicalAddress;\r
422 for (IndexOfPageTableEntries = 0; IndexOfPageTableEntries < 512; IndexOfPageTableEntries++, PageTableEntry++, PhysicalAddress4K += SIZE_4KB) {\r
423 //\r
424 // Fill in the Page Table entries\r
425 //\r
426 PageTableEntry->Uint64 = (UINT64)PhysicalAddress4K;\r
427 PageTableEntry->Bits.ReadWrite = 1;\r
428\r
429 if ((IsNullDetectionEnabled () && (PhysicalAddress4K == 0)) ||\r
430 (FixedPcdGetBool (PcdCpuStackGuard) && (PhysicalAddress4K == StackBase)))\r
431 {\r
432 PageTableEntry->Bits.Present = 0;\r
433 } else {\r
434 PageTableEntry->Bits.Present = 1;\r
435 }\r
436\r
437 if ( IsSetNxForStack ()\r
438 && (PhysicalAddress4K >= StackBase)\r
439 && (PhysicalAddress4K < StackBase + StackSize))\r
440 {\r
441 //\r
442 // Set Nx bit for stack.\r
443 //\r
444 PageTableEntry->Bits.Nx = 1;\r
445 }\r
446 }\r
447}\r
448\r
449/**\r
450 Split 1G page to 2M.\r
451\r
452 @param[in] PhysicalAddress Start physical address the 1G page covered.\r
453 @param[in, out] PageEntry1G Pointer to 1G page entry.\r
454 @param[in] StackBase Stack base address.\r
455 @param[in] StackSize Stack size.\r
f4d53900
MX
456 @param[in, out] PageTablePool Pointer to the current available memory used as\r
457 page table.\r
4fe26784
MX
458\r
459**/\r
460VOID\r
461Split1GPageTo2M (\r
462 IN EFI_PHYSICAL_ADDRESS PhysicalAddress,\r
463 IN OUT UINT64 *PageEntry1G,\r
464 IN EFI_PHYSICAL_ADDRESS StackBase,\r
f4d53900
MX
465 IN UINTN StackSize,\r
466 IN OUT PAGE_TABLE_POOL *PageTablePool\r
4fe26784
MX
467 )\r
468{\r
469 EFI_PHYSICAL_ADDRESS PhysicalAddress2M;\r
470 UINTN IndexOfPageDirectoryEntries;\r
471 PAGE_TABLE_ENTRY *PageDirectoryEntry;\r
472\r
f4d53900
MX
473 DEBUG ((DEBUG_INFO, "Split1GPageTo2M\n"));\r
474 PageDirectoryEntry = AllocatePageTableMemory (1, &PageTablePool);\r
4fe26784
MX
475\r
476 if (PageDirectoryEntry == NULL) {\r
477 ASSERT (FALSE);\r
478 return;\r
479 }\r
480\r
481 //\r
482 // Fill in 1G page entry.\r
483 //\r
484 *PageEntry1G = (UINT64)(UINTN)PageDirectoryEntry | IA32_PG_P | IA32_PG_RW;\r
485\r
486 PhysicalAddress2M = PhysicalAddress;\r
487 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PhysicalAddress2M += SIZE_2MB) {\r
488 if (ToSplitPageTable (PhysicalAddress2M, SIZE_2MB, StackBase, StackSize)) {\r
489 //\r
490 // Need to split this 2M page that covers NULL or stack range.\r
491 //\r
f4d53900 492 Split2MPageTo4K (PhysicalAddress2M, (UINT64 *)PageDirectoryEntry, StackBase, StackSize, PageTablePool);\r
4fe26784
MX
493 } else {\r
494 //\r
495 // Fill in the Page Directory entries\r
496 //\r
497 PageDirectoryEntry->Uint64 = (UINT64)PhysicalAddress2M;\r
498 PageDirectoryEntry->Bits.ReadWrite = 1;\r
499 PageDirectoryEntry->Bits.Present = 1;\r
500 PageDirectoryEntry->Bits.MustBe1 = 1;\r
501 }\r
502 }\r
503}\r
504\r
505/**\r
506 Set one page of page table pool memory to be read-only.\r
507\r
f4d53900
MX
508 @param[in] PageTableBase Base address of page table (CR3).\r
509 @param[in] Address Start address of a page to be set as read-only.\r
510 @param[in] Level4Paging Level 4 paging flag.\r
511 @param[in, out] PageTablePool Pointer to the current available memory used as\r
512 page table.\r
4fe26784
MX
513\r
514**/\r
515VOID\r
516SetPageTablePoolReadOnly (\r
517 IN UINTN PageTableBase,\r
518 IN EFI_PHYSICAL_ADDRESS Address,\r
f4d53900
MX
519 IN BOOLEAN Level4Paging,\r
520 IN OUT PAGE_TABLE_POOL *PageTablePool\r
4fe26784
MX
521 )\r
522{\r
523 UINTN Index;\r
524 UINTN EntryIndex;\r
525 EFI_PHYSICAL_ADDRESS PhysicalAddress;\r
526 UINT64 *PageTable;\r
527 UINT64 *NewPageTable;\r
528 UINT64 PageAttr;\r
529 UINTN Level;\r
530 UINT64 PoolUnitSize;\r
531\r
532 if (PageTableBase == 0) {\r
533 ASSERT (FALSE);\r
534 return;\r
535 }\r
536\r
537 //\r
538 // Since the page table is always from page table pool, which is always\r
539 // located at the boundary of PcdPageTablePoolAlignment, we just need to\r
540 // set the whole pool unit to be read-only.\r
541 //\r
542 Address = Address & PAGE_TABLE_POOL_ALIGN_MASK;\r
543\r
544 PageTable = (UINT64 *)(UINTN)PageTableBase;\r
545 PoolUnitSize = PAGE_TABLE_POOL_UNIT_SIZE;\r
546\r
547 for (Level = (Level4Paging) ? 4 : 3; Level > 0; --Level) {\r
548 Index = ((UINTN)RShiftU64 (Address, mLevelShift[Level]));\r
549 Index &= PAGING_PAE_INDEX_MASK;\r
550\r
551 PageAttr = PageTable[Index];\r
552 if ((PageAttr & IA32_PG_PS) == 0) {\r
553 //\r
554 // Go to next level of table.\r
555 //\r
556 PageTable = (UINT64 *)(UINTN)(PageAttr & PAGING_4K_ADDRESS_MASK_64);\r
557 continue;\r
558 }\r
559\r
560 if (PoolUnitSize >= mLevelSize[Level]) {\r
561 //\r
562 // Clear R/W bit if current page granularity is not larger than pool unit\r
563 // size.\r
564 //\r
565 if ((PageAttr & IA32_PG_RW) != 0) {\r
566 while (PoolUnitSize > 0) {\r
567 //\r
568 // PAGE_TABLE_POOL_UNIT_SIZE and PAGE_TABLE_POOL_ALIGNMENT are fit in\r
569 // one page (2MB). Then we don't need to update attributes for pages\r
570 // crossing page directory. ASSERT below is for that purpose.\r
571 //\r
572 ASSERT (Index < EFI_PAGE_SIZE/sizeof (UINT64));\r
573\r
574 PageTable[Index] &= ~(UINT64)IA32_PG_RW;\r
575 PoolUnitSize -= mLevelSize[Level];\r
576\r
577 ++Index;\r
578 }\r
579 }\r
580\r
581 break;\r
582 } else {\r
583 //\r
584 // The smaller granularity of page must be needed.\r
585 //\r
586 ASSERT (Level > 1);\r
587\r
f4d53900
MX
588 DEBUG ((DEBUG_INFO, "SetPageTablePoolReadOnly\n"));\r
589 NewPageTable = AllocatePageTableMemory (1, &PageTablePool);\r
4fe26784
MX
590\r
591 if (NewPageTable == NULL) {\r
592 ASSERT (FALSE);\r
593 return;\r
594 }\r
595\r
596 PhysicalAddress = PageAttr & mLevelMask[Level];\r
597 for (EntryIndex = 0;\r
598 EntryIndex < EFI_PAGE_SIZE/sizeof (UINT64);\r
599 ++EntryIndex)\r
600 {\r
601 NewPageTable[EntryIndex] = PhysicalAddress |\r
602 IA32_PG_P | IA32_PG_RW;\r
603 if (Level > 2) {\r
604 NewPageTable[EntryIndex] |= IA32_PG_PS;\r
605 }\r
606\r
607 PhysicalAddress += mLevelSize[Level - 1];\r
608 }\r
609\r
610 PageTable[Index] = (UINT64)(UINTN)NewPageTable |\r
611 IA32_PG_P | IA32_PG_RW;\r
612 PageTable = NewPageTable;\r
613 }\r
614 }\r
615}\r
616\r
617/**\r
618 Prevent the memory pages used for page table from been overwritten.\r
619\r
f4d53900
MX
620 @param[in] PageTableBase Base address of page table (CR3).\r
621 @param[in] Level4Paging Level 4 paging flag.\r
622 @param[in, out] PageTablePool Pointer to the current available memory used as\r
623 page table.\r
4fe26784
MX
624\r
625**/\r
626VOID\r
627EnablePageTableProtection (\r
f4d53900
MX
628 IN UINTN PageTableBase,\r
629 IN BOOLEAN Level4Paging,\r
630 IN OUT PAGE_TABLE_POOL *PageTablePool\r
4fe26784
MX
631 )\r
632{\r
633 PAGE_TABLE_POOL *HeadPool;\r
634 PAGE_TABLE_POOL *Pool;\r
635 UINT64 PoolSize;\r
636 EFI_PHYSICAL_ADDRESS Address;\r
637\r
638 DEBUG ((DEBUG_INFO, "EnablePageTableProtection\n"));\r
639\r
f4d53900 640 if (PageTablePool == NULL) {\r
4fe26784
MX
641 return;\r
642 }\r
643\r
644 //\r
645 // Disable write protection, because we need to mark page table to be write\r
646 // protected.\r
647 //\r
648 AsmWriteCr0 (AsmReadCr0 () & ~CR0_WP);\r
649\r
650 //\r
f4d53900 651 // SetPageTablePoolReadOnly might update PageTablePool. It's safer to\r
4fe26784
MX
652 // remember original one in advance.\r
653 //\r
f4d53900 654 HeadPool = PageTablePool;\r
4fe26784
MX
655 Pool = HeadPool;\r
656 do {\r
657 Address = (EFI_PHYSICAL_ADDRESS)(UINTN)Pool;\r
658 PoolSize = Pool->Offset + EFI_PAGES_TO_SIZE (Pool->FreePages);\r
659\r
660 //\r
661 // The size of one pool must be multiple of PAGE_TABLE_POOL_UNIT_SIZE, which\r
662 // is one of page size of the processor (2MB by default). Let's apply the\r
663 // protection to them one by one.\r
664 //\r
665 while (PoolSize > 0) {\r
f4d53900 666 SetPageTablePoolReadOnly (PageTableBase, Address, Level4Paging, PageTablePool);\r
4fe26784
MX
667 Address += PAGE_TABLE_POOL_UNIT_SIZE;\r
668 PoolSize -= PAGE_TABLE_POOL_UNIT_SIZE;\r
669 }\r
670\r
671 Pool = Pool->NextPool;\r
672 } while (Pool != HeadPool);\r
673\r
674 //\r
675 // Enable write protection, after page table attribute updated.\r
676 //\r
677 AsmWriteCr0 (AsmReadCr0 () | CR0_WP);\r
678}\r
679\r
680/**\r
681 Allocates and fills in the Page Directory and Page Table Entries to\r
682 establish a 1:1 Virtual to Physical mapping.\r
683\r
684 @param[in] StackBase Stack base address.\r
685 @param[in] StackSize Stack size.\r
686\r
687 @return The address of 4 level page map.\r
688\r
689**/\r
690UINTN\r
691CreateIdentityMappingPageTables (\r
692 IN EFI_PHYSICAL_ADDRESS StackBase,\r
693 IN UINTN StackSize\r
694 )\r
695{\r
696 UINT32 RegEax;\r
697 UINT32 RegEdx;\r
698 UINT8 PhysicalAddressBits;\r
699 EFI_PHYSICAL_ADDRESS PageAddress;\r
700 UINTN IndexOfPml5Entries;\r
701 UINTN IndexOfPml4Entries;\r
702 UINTN IndexOfPdpEntries;\r
703 UINTN IndexOfPageDirectoryEntries;\r
704 UINT32 NumberOfPml5EntriesNeeded;\r
705 UINT32 NumberOfPml4EntriesNeeded;\r
706 UINT32 NumberOfPdpEntriesNeeded;\r
707 PAGE_MAP_AND_DIRECTORY_POINTER *PageMapLevel5Entry;\r
708 PAGE_MAP_AND_DIRECTORY_POINTER *PageMapLevel4Entry;\r
709 PAGE_MAP_AND_DIRECTORY_POINTER *PageMap;\r
710 PAGE_MAP_AND_DIRECTORY_POINTER *PageDirectoryPointerEntry;\r
711 PAGE_TABLE_ENTRY *PageDirectoryEntry;\r
712 UINTN TotalPagesNum;\r
713 UINTN BigPageAddress;\r
714 VOID *Hob;\r
715 BOOLEAN Page5LevelSupport;\r
716 BOOLEAN Page1GSupport;\r
717 PAGE_TABLE_1G_ENTRY *PageDirectory1GEntry;\r
718 IA32_CR4 Cr4;\r
f4d53900 719 PAGE_TABLE_POOL *PageTablePool;\r
4fe26784
MX
720\r
721 //\r
722 // Set PageMapLevel5Entry to suppress incorrect compiler/analyzer warnings\r
723 //\r
724 PageMapLevel5Entry = NULL;\r
725\r
726 Page1GSupport = FALSE;\r
727 if (FixedPcdGetBool (PcdUse1GPageTable)) {\r
728 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);\r
729 if (RegEax >= 0x80000001) {\r
730 AsmCpuid (0x80000001, NULL, NULL, NULL, &RegEdx);\r
731 if ((RegEdx & BIT26) != 0) {\r
732 Page1GSupport = TRUE;\r
733 }\r
734 }\r
735 }\r
736\r
737 //\r
738 // Get physical address bits supported.\r
739 //\r
740 Hob = GetFirstHob (EFI_HOB_TYPE_CPU);\r
741 if (Hob == NULL) {\r
742 ASSERT (FALSE);\r
743 return 0;\r
744 }\r
745\r
746 PhysicalAddressBits = ((EFI_HOB_CPU *)Hob)->SizeOfMemorySpace;\r
747\r
748 //\r
749 // CPU will already have LA57 enabled so just check CR4\r
750 Cr4.UintN = AsmReadCr4 ();\r
751 Page5LevelSupport = (Cr4.Bits.LA57 ? TRUE : FALSE);\r
752\r
753 DEBUG ((\r
754 DEBUG_INFO,\r
755 "AddressBits=%u 5LevelPaging=%u 1GPage=%u \n",\r
756 PhysicalAddressBits,\r
757 Page5LevelSupport,\r
758 Page1GSupport\r
759 ));\r
760\r
761 //\r
762 // Calculate the table entries needed.\r
763 //\r
764 NumberOfPml5EntriesNeeded = 1;\r
765 if (PhysicalAddressBits > 48) {\r
766 NumberOfPml5EntriesNeeded = (UINT32)LShiftU64 (1, PhysicalAddressBits - 48);\r
767 PhysicalAddressBits = 48;\r
768 }\r
769\r
770 NumberOfPml4EntriesNeeded = 1;\r
771 if (PhysicalAddressBits > 39) {\r
772 NumberOfPml4EntriesNeeded = (UINT32)LShiftU64 (1, PhysicalAddressBits - 39);\r
773 PhysicalAddressBits = 39;\r
774 }\r
775\r
776 NumberOfPdpEntriesNeeded = 1;\r
777 ASSERT (PhysicalAddressBits > 30);\r
778 NumberOfPdpEntriesNeeded = (UINT32)LShiftU64 (1, PhysicalAddressBits - 30);\r
779\r
780 //\r
781 // Pre-allocate big pages to avoid later allocations.\r
782 //\r
783 if (!Page1GSupport) {\r
784 TotalPagesNum = ((NumberOfPdpEntriesNeeded + 1) * NumberOfPml4EntriesNeeded + 1) * NumberOfPml5EntriesNeeded + 1;\r
785 } else {\r
786 TotalPagesNum = (NumberOfPml4EntriesNeeded + 1) * NumberOfPml5EntriesNeeded + 1;\r
787 }\r
788\r
789 //\r
790 // Substract the one page occupied by PML5 entries if 5-Level Paging is disabled.\r
791 //\r
792 if (!Page5LevelSupport) {\r
793 TotalPagesNum--;\r
794 }\r
795\r
796 DEBUG ((\r
797 DEBUG_INFO,\r
798 "Pml5=%u Pml4=%u Pdp=%u TotalPage=%Lu\n",\r
799 NumberOfPml5EntriesNeeded,\r
800 NumberOfPml4EntriesNeeded,\r
801 NumberOfPdpEntriesNeeded,\r
802 (UINT64)TotalPagesNum\r
803 ));\r
804\r
f4d53900
MX
805 PageTablePool = NULL;\r
806 BigPageAddress = (UINTN)AllocatePageTableMemory (TotalPagesNum, &PageTablePool);\r
4fe26784
MX
807 if (BigPageAddress == 0) {\r
808 ASSERT (FALSE);\r
809 return 0;\r
810 }\r
811\r
f4d53900 812 DEBUG ((DEBUG_INFO, "BigPageAddress = 0x%llx, PageTablePool=%p\n", BigPageAddress, PageTablePool));\r
4fe26784
MX
813\r
814 //\r
815 // By architecture only one PageMapLevel4 exists - so lets allocate storage for it.\r
816 //\r
817 PageMap = (VOID *)BigPageAddress;\r
818 if (Page5LevelSupport) {\r
819 //\r
820 // By architecture only one PageMapLevel5 exists - so lets allocate storage for it.\r
821 //\r
822 PageMapLevel5Entry = PageMap;\r
823 BigPageAddress += SIZE_4KB;\r
824 }\r
825\r
826 PageAddress = 0;\r
827\r
828 for ( IndexOfPml5Entries = 0\r
829 ; IndexOfPml5Entries < NumberOfPml5EntriesNeeded\r
830 ; IndexOfPml5Entries++)\r
831 {\r
832 //\r
833 // Each PML5 entry points to a page of PML4 entires.\r
e87ac5ef 834 // So lets allocate space for them and fill them in the IndexOfPml4Entries loop.\r
4fe26784
MX
835 // When 5-Level Paging is disabled, below allocation happens only once.\r
836 //\r
837 PageMapLevel4Entry = (VOID *)BigPageAddress;\r
838 BigPageAddress += SIZE_4KB;\r
839\r
840 if (Page5LevelSupport) {\r
841 //\r
842 // Make a PML5 Entry\r
843 //\r
844 PageMapLevel5Entry->Uint64 = (UINT64)(UINTN)PageMapLevel4Entry;\r
845 PageMapLevel5Entry->Bits.ReadWrite = 1;\r
846 PageMapLevel5Entry->Bits.Present = 1;\r
847 PageMapLevel5Entry++;\r
848 }\r
849\r
850 for ( IndexOfPml4Entries = 0\r
851 ; IndexOfPml4Entries < (NumberOfPml5EntriesNeeded == 1 ? NumberOfPml4EntriesNeeded : 512)\r
852 ; IndexOfPml4Entries++, PageMapLevel4Entry++)\r
853 {\r
854 //\r
855 // Each PML4 entry points to a page of Page Directory Pointer entires.\r
e87ac5ef 856 // So lets allocate space for them and fill them in the IndexOfPdpEntries loop.\r
4fe26784
MX
857 //\r
858 PageDirectoryPointerEntry = (VOID *)BigPageAddress;\r
859 BigPageAddress += SIZE_4KB;\r
860\r
861 //\r
862 // Make a PML4 Entry\r
863 //\r
864 PageMapLevel4Entry->Uint64 = (UINT64)(UINTN)PageDirectoryPointerEntry;\r
865 PageMapLevel4Entry->Bits.ReadWrite = 1;\r
866 PageMapLevel4Entry->Bits.Present = 1;\r
867\r
868 if (Page1GSupport) {\r
869 PageDirectory1GEntry = (VOID *)PageDirectoryPointerEntry;\r
870\r
871 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectory1GEntry++, PageAddress += SIZE_1GB) {\r
872 if (ToSplitPageTable (PageAddress, SIZE_1GB, StackBase, StackSize)) {\r
873 Split1GPageTo2M (\r
874 PageAddress,\r
875 (UINT64 *)PageDirectory1GEntry,\r
876 StackBase,\r
f4d53900
MX
877 StackSize,\r
878 PageTablePool\r
4fe26784
MX
879 );\r
880 } else {\r
881 //\r
882 // Fill in the Page Directory entries\r
883 //\r
884 PageDirectory1GEntry->Uint64 = (UINT64)PageAddress;\r
885 PageDirectory1GEntry->Bits.ReadWrite = 1;\r
886 PageDirectory1GEntry->Bits.Present = 1;\r
887 PageDirectory1GEntry->Bits.MustBe1 = 1;\r
888 }\r
889 }\r
890 } else {\r
891 for ( IndexOfPdpEntries = 0\r
892 ; IndexOfPdpEntries < (NumberOfPml4EntriesNeeded == 1 ? NumberOfPdpEntriesNeeded : 512)\r
893 ; IndexOfPdpEntries++, PageDirectoryPointerEntry++)\r
894 {\r
895 //\r
896 // Each Directory Pointer entries points to a page of Page Directory entires.\r
e87ac5ef 897 // So allocate space for them and fill them in the IndexOfPageDirectoryEntries loop.\r
4fe26784
MX
898 //\r
899 PageDirectoryEntry = (VOID *)BigPageAddress;\r
900 BigPageAddress += SIZE_4KB;\r
901\r
902 //\r
903 // Fill in a Page Directory Pointer Entries\r
904 //\r
905 PageDirectoryPointerEntry->Uint64 = (UINT64)(UINTN)PageDirectoryEntry;\r
906 PageDirectoryPointerEntry->Bits.ReadWrite = 1;\r
907 PageDirectoryPointerEntry->Bits.Present = 1;\r
908\r
909 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PageAddress += SIZE_2MB) {\r
910 if (ToSplitPageTable (PageAddress, SIZE_2MB, StackBase, StackSize)) {\r
911 //\r
912 // Need to split this 2M page that covers NULL or stack range.\r
913 //\r
f4d53900 914 Split2MPageTo4K (PageAddress, (UINT64 *)PageDirectoryEntry, StackBase, StackSize, PageTablePool);\r
4fe26784
MX
915 } else {\r
916 //\r
917 // Fill in the Page Directory entries\r
918 //\r
919 PageDirectoryEntry->Uint64 = (UINT64)PageAddress;\r
920 PageDirectoryEntry->Bits.ReadWrite = 1;\r
921 PageDirectoryEntry->Bits.Present = 1;\r
922 PageDirectoryEntry->Bits.MustBe1 = 1;\r
923 }\r
924 }\r
925 }\r
926\r
927 //\r
928 // Fill with null entry for unused PDPTE\r
929 //\r
930 ZeroMem (PageDirectoryPointerEntry, (512 - IndexOfPdpEntries) * sizeof (PAGE_MAP_AND_DIRECTORY_POINTER));\r
931 }\r
932 }\r
933\r
934 //\r
935 // For the PML4 entries we are not using fill in a null entry.\r
936 //\r
937 ZeroMem (PageMapLevel4Entry, (512 - IndexOfPml4Entries) * sizeof (PAGE_MAP_AND_DIRECTORY_POINTER));\r
938 }\r
939\r
940 if (Page5LevelSupport) {\r
941 //\r
942 // For the PML5 entries we are not using fill in a null entry.\r
943 //\r
944 ZeroMem (PageMapLevel5Entry, (512 - IndexOfPml5Entries) * sizeof (PAGE_MAP_AND_DIRECTORY_POINTER));\r
945 }\r
946\r
947 //\r
948 // Protect the page table by marking the memory used for page table to be\r
949 // read-only.\r
950 //\r
f4d53900 951 EnablePageTableProtection ((UINTN)PageMap, TRUE, PageTablePool);\r
4fe26784
MX
952\r
953 return (UINTN)PageMap;\r
954}\r