]> git.proxmox.com Git - mirror_edk2.git/blob - UefiCpuPkg/CpuDxe/CpuPageTable.c
BaseTools/Capsule: Fix CertType GUID byte order
[mirror_edk2.git] / UefiCpuPkg / CpuDxe / CpuPageTable.c
1 /** @file
2 Page table management support.
3
4 Copyright (c) 2017, Intel Corporation. All rights reserved.<BR>
5 Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
6
7 This program and the accompanying materials
8 are licensed and made available under the terms and conditions of the BSD License
9 which accompanies this distribution. The full text of the license may be found at
10 http://opensource.org/licenses/bsd-license.php
11
12 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
13 WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
14
15 **/
16
17 #include <Base.h>
18 #include <Uefi.h>
19 #include <Library/BaseLib.h>
20 #include <Library/CpuLib.h>
21 #include <Library/BaseMemoryLib.h>
22 #include <Library/MemoryAllocationLib.h>
23 #include <Library/DebugLib.h>
24 #include <Library/UefiBootServicesTableLib.h>
25 #include <Protocol/MpService.h>
26 #include <Protocol/SmmBase2.h>
27 #include <Register/Cpuid.h>
28 #include <Register/Msr.h>
29
30 #include "CpuDxe.h"
31 #include "CpuPageTable.h"
32
33 ///
34 /// Paging registers
35 ///
36 #define CR0_WP BIT16
37 #define CR0_PG BIT31
38 #define CR4_PSE BIT4
39 #define CR4_PAE BIT5
40
41 ///
42 /// Page Table Entry
43 ///
44 #define IA32_PG_P BIT0
45 #define IA32_PG_RW BIT1
46 #define IA32_PG_U BIT2
47 #define IA32_PG_WT BIT3
48 #define IA32_PG_CD BIT4
49 #define IA32_PG_A BIT5
50 #define IA32_PG_D BIT6
51 #define IA32_PG_PS BIT7
52 #define IA32_PG_PAT_2M BIT12
53 #define IA32_PG_PAT_4K IA32_PG_PS
54 #define IA32_PG_PMNT BIT62
55 #define IA32_PG_NX BIT63
56
57 #define PAGE_ATTRIBUTE_BITS (IA32_PG_D | IA32_PG_A | IA32_PG_U | IA32_PG_RW | IA32_PG_P)
58 //
59 // Bits 1, 2, 5, 6 are reserved in the IA32 PAE PDPTE
60 // X64 PAE PDPTE does not have such restriction
61 //
62 #define IA32_PAE_PDPTE_ATTRIBUTE_BITS (IA32_PG_P)
63
64 #define PAGE_PROGATE_BITS (IA32_PG_NX | PAGE_ATTRIBUTE_BITS)
65
66 #define PAGING_4K_MASK 0xFFF
67 #define PAGING_2M_MASK 0x1FFFFF
68 #define PAGING_1G_MASK 0x3FFFFFFF
69
70 #define PAGING_PAE_INDEX_MASK 0x1FF
71
72 #define PAGING_4K_ADDRESS_MASK_64 0x000FFFFFFFFFF000ull
73 #define PAGING_2M_ADDRESS_MASK_64 0x000FFFFFFFE00000ull
74 #define PAGING_1G_ADDRESS_MASK_64 0x000FFFFFC0000000ull
75
76 typedef enum {
77 PageNone,
78 Page4K,
79 Page2M,
80 Page1G,
81 } PAGE_ATTRIBUTE;
82
83 typedef struct {
84 PAGE_ATTRIBUTE Attribute;
85 UINT64 Length;
86 UINT64 AddressMask;
87 } PAGE_ATTRIBUTE_TABLE;
88
89 typedef enum {
90 PageActionAssign,
91 PageActionSet,
92 PageActionClear,
93 } PAGE_ACTION;
94
95 PAGE_ATTRIBUTE_TABLE mPageAttributeTable[] = {
96 {Page4K, SIZE_4KB, PAGING_4K_ADDRESS_MASK_64},
97 {Page2M, SIZE_2MB, PAGING_2M_ADDRESS_MASK_64},
98 {Page1G, SIZE_1GB, PAGING_1G_ADDRESS_MASK_64},
99 };
100
101 PAGE_TABLE_POOL *mPageTablePool = NULL;
102 PAGE_TABLE_LIB_PAGING_CONTEXT mPagingContext;
103 EFI_SMM_BASE2_PROTOCOL *mSmmBase2 = NULL;
104
105 /**
106 Check if current execution environment is in SMM mode or not, via
107 EFI_SMM_BASE2_PROTOCOL.
108
109 This is necessary because of the fact that MdePkg\Library\SmmMemoryAllocationLib
110 supports to free memory outside SMRAM. The library will call gBS->FreePool() or
111 gBS->FreePages() and then SetMemorySpaceAttributes interface in turn to change
112 memory paging attributes during free operation, if some memory related features
113 are enabled (like Heap Guard).
114
115 This means that SetMemorySpaceAttributes() has chance to run in SMM mode. This
116 will cause incorrect result because SMM mode always loads its own page tables,
117 which are usually different from DXE. This function can be used to detect such
118 situation and help to avoid further misoperations.
119
120 @retval TRUE In SMM mode.
121 @retval FALSE Not in SMM mode.
122 **/
123 BOOLEAN
124 IsInSmm (
125 VOID
126 )
127 {
128 BOOLEAN InSmm;
129
130 InSmm = FALSE;
131 if (mSmmBase2 == NULL) {
132 gBS->LocateProtocol (&gEfiSmmBase2ProtocolGuid, NULL, (VOID **)&mSmmBase2);
133 }
134
135 if (mSmmBase2 != NULL) {
136 mSmmBase2->InSmm (mSmmBase2, &InSmm);
137 }
138
139 //
140 // mSmmBase2->InSmm() can only detect if the caller is running in SMRAM
141 // or from SMM driver. It cannot tell if the caller is running in SMM mode.
142 // Check page table base address to guarantee that because SMM mode willl
143 // load its own page table.
144 //
145 return (InSmm &&
146 mPagingContext.ContextData.X64.PageTableBase != (UINT64)AsmReadCr3());
147 }
148
149 /**
150 Return current paging context.
151
152 @param[in,out] PagingContext The paging context.
153 **/
154 VOID
155 GetCurrentPagingContext (
156 IN OUT PAGE_TABLE_LIB_PAGING_CONTEXT *PagingContext
157 )
158 {
159 UINT32 RegEax;
160 CPUID_EXTENDED_CPU_SIG_EDX RegEdx;
161 MSR_IA32_EFER_REGISTER MsrEfer;
162
163 //
164 // Don't retrieve current paging context from processor if in SMM mode.
165 //
166 if (!IsInSmm ()) {
167 ZeroMem (&mPagingContext, sizeof(mPagingContext));
168 if (sizeof(UINTN) == sizeof(UINT64)) {
169 mPagingContext.MachineType = IMAGE_FILE_MACHINE_X64;
170 } else {
171 mPagingContext.MachineType = IMAGE_FILE_MACHINE_I386;
172 }
173 if ((AsmReadCr0 () & CR0_PG) != 0) {
174 mPagingContext.ContextData.X64.PageTableBase = (AsmReadCr3 () & PAGING_4K_ADDRESS_MASK_64);
175 } else {
176 mPagingContext.ContextData.X64.PageTableBase = 0;
177 }
178
179 if ((AsmReadCr4 () & CR4_PSE) != 0) {
180 mPagingContext.ContextData.Ia32.Attributes |= PAGE_TABLE_LIB_PAGING_CONTEXT_IA32_X64_ATTRIBUTES_PSE;
181 }
182 if ((AsmReadCr4 () & CR4_PAE) != 0) {
183 mPagingContext.ContextData.Ia32.Attributes |= PAGE_TABLE_LIB_PAGING_CONTEXT_IA32_X64_ATTRIBUTES_PAE;
184 }
185 if ((AsmReadCr0 () & CR0_WP) != 0) {
186 mPagingContext.ContextData.Ia32.Attributes |= PAGE_TABLE_LIB_PAGING_CONTEXT_IA32_X64_ATTRIBUTES_WP_ENABLE;
187 }
188
189 AsmCpuid (CPUID_EXTENDED_FUNCTION, &RegEax, NULL, NULL, NULL);
190 if (RegEax >= CPUID_EXTENDED_CPU_SIG) {
191 AsmCpuid (CPUID_EXTENDED_CPU_SIG, NULL, NULL, NULL, &RegEdx.Uint32);
192
193 if (RegEdx.Bits.NX != 0) {
194 // XD supported
195 MsrEfer.Uint64 = AsmReadMsr64(MSR_CORE_IA32_EFER);
196 if (MsrEfer.Bits.NXE != 0) {
197 // XD activated
198 mPagingContext.ContextData.Ia32.Attributes |= PAGE_TABLE_LIB_PAGING_CONTEXT_IA32_X64_ATTRIBUTES_XD_ACTIVATED;
199 }
200 }
201
202 if (RegEdx.Bits.Page1GB != 0) {
203 mPagingContext.ContextData.Ia32.Attributes |= PAGE_TABLE_LIB_PAGING_CONTEXT_IA32_X64_ATTRIBUTES_PAGE_1G_SUPPORT;
204 }
205 }
206 }
207
208 //
209 // This can avoid getting SMM paging context if in SMM mode. We cannot assume
210 // SMM mode shares the same paging context as DXE.
211 //
212 CopyMem (PagingContext, &mPagingContext, sizeof (mPagingContext));
213 }
214
215 /**
216 Return length according to page attributes.
217
218 @param[in] PageAttributes The page attribute of the page entry.
219
220 @return The length of page entry.
221 **/
222 UINTN
223 PageAttributeToLength (
224 IN PAGE_ATTRIBUTE PageAttribute
225 )
226 {
227 UINTN Index;
228 for (Index = 0; Index < sizeof(mPageAttributeTable)/sizeof(mPageAttributeTable[0]); Index++) {
229 if (PageAttribute == mPageAttributeTable[Index].Attribute) {
230 return (UINTN)mPageAttributeTable[Index].Length;
231 }
232 }
233 return 0;
234 }
235
236 /**
237 Return address mask according to page attributes.
238
239 @param[in] PageAttributes The page attribute of the page entry.
240
241 @return The address mask of page entry.
242 **/
243 UINTN
244 PageAttributeToMask (
245 IN PAGE_ATTRIBUTE PageAttribute
246 )
247 {
248 UINTN Index;
249 for (Index = 0; Index < sizeof(mPageAttributeTable)/sizeof(mPageAttributeTable[0]); Index++) {
250 if (PageAttribute == mPageAttributeTable[Index].Attribute) {
251 return (UINTN)mPageAttributeTable[Index].AddressMask;
252 }
253 }
254 return 0;
255 }
256
257 /**
258 Return page table entry to match the address.
259
260 @param[in] PagingContext The paging context.
261 @param[in] Address The address to be checked.
262 @param[out] PageAttributes The page attribute of the page entry.
263
264 @return The page entry.
265 **/
266 VOID *
267 GetPageTableEntry (
268 IN PAGE_TABLE_LIB_PAGING_CONTEXT *PagingContext,
269 IN PHYSICAL_ADDRESS Address,
270 OUT PAGE_ATTRIBUTE *PageAttribute
271 )
272 {
273 UINTN Index1;
274 UINTN Index2;
275 UINTN Index3;
276 UINTN Index4;
277 UINT64 *L1PageTable;
278 UINT64 *L2PageTable;
279 UINT64 *L3PageTable;
280 UINT64 *L4PageTable;
281 UINT64 AddressEncMask;
282
283 ASSERT (PagingContext != NULL);
284
285 Index4 = ((UINTN)RShiftU64 (Address, 39)) & PAGING_PAE_INDEX_MASK;
286 Index3 = ((UINTN)Address >> 30) & PAGING_PAE_INDEX_MASK;
287 Index2 = ((UINTN)Address >> 21) & PAGING_PAE_INDEX_MASK;
288 Index1 = ((UINTN)Address >> 12) & PAGING_PAE_INDEX_MASK;
289
290 // Make sure AddressEncMask is contained to smallest supported address field.
291 //
292 AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;
293
294 if (PagingContext->MachineType == IMAGE_FILE_MACHINE_X64) {
295 L4PageTable = (UINT64 *)(UINTN)PagingContext->ContextData.X64.PageTableBase;
296 if (L4PageTable[Index4] == 0) {
297 *PageAttribute = PageNone;
298 return NULL;
299 }
300
301 L3PageTable = (UINT64 *)(UINTN)(L4PageTable[Index4] & ~AddressEncMask & PAGING_4K_ADDRESS_MASK_64);
302 } else {
303 ASSERT((PagingContext->ContextData.Ia32.Attributes & PAGE_TABLE_LIB_PAGING_CONTEXT_IA32_X64_ATTRIBUTES_PAE) != 0);
304 L3PageTable = (UINT64 *)(UINTN)PagingContext->ContextData.Ia32.PageTableBase;
305 }
306 if (L3PageTable[Index3] == 0) {
307 *PageAttribute = PageNone;
308 return NULL;
309 }
310 if ((L3PageTable[Index3] & IA32_PG_PS) != 0) {
311 // 1G
312 *PageAttribute = Page1G;
313 return &L3PageTable[Index3];
314 }
315
316 L2PageTable = (UINT64 *)(UINTN)(L3PageTable[Index3] & ~AddressEncMask & PAGING_4K_ADDRESS_MASK_64);
317 if (L2PageTable[Index2] == 0) {
318 *PageAttribute = PageNone;
319 return NULL;
320 }
321 if ((L2PageTable[Index2] & IA32_PG_PS) != 0) {
322 // 2M
323 *PageAttribute = Page2M;
324 return &L2PageTable[Index2];
325 }
326
327 // 4k
328 L1PageTable = (UINT64 *)(UINTN)(L2PageTable[Index2] & ~AddressEncMask & PAGING_4K_ADDRESS_MASK_64);
329 if ((L1PageTable[Index1] == 0) && (Address != 0)) {
330 *PageAttribute = PageNone;
331 return NULL;
332 }
333 *PageAttribute = Page4K;
334 return &L1PageTable[Index1];
335 }
336
337 /**
338 Return memory attributes of page entry.
339
340 @param[in] PageEntry The page entry.
341
342 @return Memory attributes of page entry.
343 **/
344 UINT64
345 GetAttributesFromPageEntry (
346 IN UINT64 *PageEntry
347 )
348 {
349 UINT64 Attributes;
350 Attributes = 0;
351 if ((*PageEntry & IA32_PG_P) == 0) {
352 Attributes |= EFI_MEMORY_RP;
353 }
354 if ((*PageEntry & IA32_PG_RW) == 0) {
355 Attributes |= EFI_MEMORY_RO;
356 }
357 if ((*PageEntry & IA32_PG_NX) != 0) {
358 Attributes |= EFI_MEMORY_XP;
359 }
360 return Attributes;
361 }
362
363 /**
364 Modify memory attributes of page entry.
365
366 @param[in] PagingContext The paging context.
367 @param[in] PageEntry The page entry.
368 @param[in] Attributes The bit mask of attributes to modify for the memory region.
369 @param[in] PageAction The page action.
370 @param[out] IsModified TRUE means page table modified. FALSE means page table not modified.
371 **/
372 VOID
373 ConvertPageEntryAttribute (
374 IN PAGE_TABLE_LIB_PAGING_CONTEXT *PagingContext,
375 IN UINT64 *PageEntry,
376 IN UINT64 Attributes,
377 IN PAGE_ACTION PageAction,
378 OUT BOOLEAN *IsModified
379 )
380 {
381 UINT64 CurrentPageEntry;
382 UINT64 NewPageEntry;
383
384 CurrentPageEntry = *PageEntry;
385 NewPageEntry = CurrentPageEntry;
386 if ((Attributes & EFI_MEMORY_RP) != 0) {
387 switch (PageAction) {
388 case PageActionAssign:
389 case PageActionSet:
390 NewPageEntry &= ~(UINT64)IA32_PG_P;
391 break;
392 case PageActionClear:
393 NewPageEntry |= IA32_PG_P;
394 break;
395 }
396 } else {
397 switch (PageAction) {
398 case PageActionAssign:
399 NewPageEntry |= IA32_PG_P;
400 break;
401 case PageActionSet:
402 case PageActionClear:
403 break;
404 }
405 }
406 if ((Attributes & EFI_MEMORY_RO) != 0) {
407 switch (PageAction) {
408 case PageActionAssign:
409 case PageActionSet:
410 NewPageEntry &= ~(UINT64)IA32_PG_RW;
411 break;
412 case PageActionClear:
413 NewPageEntry |= IA32_PG_RW;
414 break;
415 }
416 } else {
417 switch (PageAction) {
418 case PageActionAssign:
419 NewPageEntry |= IA32_PG_RW;
420 break;
421 case PageActionSet:
422 case PageActionClear:
423 break;
424 }
425 }
426 if ((PagingContext->ContextData.Ia32.Attributes & PAGE_TABLE_LIB_PAGING_CONTEXT_IA32_X64_ATTRIBUTES_XD_ACTIVATED) != 0) {
427 if ((Attributes & EFI_MEMORY_XP) != 0) {
428 switch (PageAction) {
429 case PageActionAssign:
430 case PageActionSet:
431 NewPageEntry |= IA32_PG_NX;
432 break;
433 case PageActionClear:
434 NewPageEntry &= ~IA32_PG_NX;
435 break;
436 }
437 } else {
438 switch (PageAction) {
439 case PageActionAssign:
440 NewPageEntry &= ~IA32_PG_NX;
441 break;
442 case PageActionSet:
443 case PageActionClear:
444 break;
445 }
446 }
447 }
448 *PageEntry = NewPageEntry;
449 if (CurrentPageEntry != NewPageEntry) {
450 *IsModified = TRUE;
451 DEBUG ((DEBUG_VERBOSE, "ConvertPageEntryAttribute 0x%lx", CurrentPageEntry));
452 DEBUG ((DEBUG_VERBOSE, "->0x%lx\n", NewPageEntry));
453 } else {
454 *IsModified = FALSE;
455 }
456 }
457
458 /**
459 This function returns if there is need to split page entry.
460
461 @param[in] BaseAddress The base address to be checked.
462 @param[in] Length The length to be checked.
463 @param[in] PageEntry The page entry to be checked.
464 @param[in] PageAttribute The page attribute of the page entry.
465
466 @retval SplitAttributes on if there is need to split page entry.
467 **/
468 PAGE_ATTRIBUTE
469 NeedSplitPage (
470 IN PHYSICAL_ADDRESS BaseAddress,
471 IN UINT64 Length,
472 IN UINT64 *PageEntry,
473 IN PAGE_ATTRIBUTE PageAttribute
474 )
475 {
476 UINT64 PageEntryLength;
477
478 PageEntryLength = PageAttributeToLength (PageAttribute);
479
480 if (((BaseAddress & (PageEntryLength - 1)) == 0) && (Length >= PageEntryLength)) {
481 return PageNone;
482 }
483
484 if (((BaseAddress & PAGING_2M_MASK) != 0) || (Length < SIZE_2MB)) {
485 return Page4K;
486 }
487
488 return Page2M;
489 }
490
491 /**
492 This function splits one page entry to small page entries.
493
494 @param[in] PageEntry The page entry to be splitted.
495 @param[in] PageAttribute The page attribute of the page entry.
496 @param[in] SplitAttribute How to split the page entry.
497 @param[in] AllocatePagesFunc If page split is needed, this function is used to allocate more pages.
498
499 @retval RETURN_SUCCESS The page entry is splitted.
500 @retval RETURN_UNSUPPORTED The page entry does not support to be splitted.
501 @retval RETURN_OUT_OF_RESOURCES No resource to split page entry.
502 **/
503 RETURN_STATUS
504 SplitPage (
505 IN UINT64 *PageEntry,
506 IN PAGE_ATTRIBUTE PageAttribute,
507 IN PAGE_ATTRIBUTE SplitAttribute,
508 IN PAGE_TABLE_LIB_ALLOCATE_PAGES AllocatePagesFunc
509 )
510 {
511 UINT64 BaseAddress;
512 UINT64 *NewPageEntry;
513 UINTN Index;
514 UINT64 AddressEncMask;
515
516 ASSERT (PageAttribute == Page2M || PageAttribute == Page1G);
517
518 ASSERT (AllocatePagesFunc != NULL);
519
520 // Make sure AddressEncMask is contained to smallest supported address field.
521 //
522 AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;
523
524 if (PageAttribute == Page2M) {
525 //
526 // Split 2M to 4K
527 //
528 ASSERT (SplitAttribute == Page4K);
529 if (SplitAttribute == Page4K) {
530 NewPageEntry = AllocatePagesFunc (1);
531 DEBUG ((DEBUG_INFO, "Split - 0x%x\n", NewPageEntry));
532 if (NewPageEntry == NULL) {
533 return RETURN_OUT_OF_RESOURCES;
534 }
535 BaseAddress = *PageEntry & ~AddressEncMask & PAGING_2M_ADDRESS_MASK_64;
536 for (Index = 0; Index < SIZE_4KB / sizeof(UINT64); Index++) {
537 NewPageEntry[Index] = (BaseAddress + SIZE_4KB * Index) | AddressEncMask | ((*PageEntry) & PAGE_PROGATE_BITS);
538 }
539 (*PageEntry) = (UINT64)(UINTN)NewPageEntry | AddressEncMask | ((*PageEntry) & PAGE_ATTRIBUTE_BITS);
540 return RETURN_SUCCESS;
541 } else {
542 return RETURN_UNSUPPORTED;
543 }
544 } else if (PageAttribute == Page1G) {
545 //
546 // Split 1G to 2M
547 // No need support 1G->4K directly, we should use 1G->2M, then 2M->4K to get more compact page table.
548 //
549 ASSERT (SplitAttribute == Page2M || SplitAttribute == Page4K);
550 if ((SplitAttribute == Page2M || SplitAttribute == Page4K)) {
551 NewPageEntry = AllocatePagesFunc (1);
552 DEBUG ((DEBUG_INFO, "Split - 0x%x\n", NewPageEntry));
553 if (NewPageEntry == NULL) {
554 return RETURN_OUT_OF_RESOURCES;
555 }
556 BaseAddress = *PageEntry & ~AddressEncMask & PAGING_1G_ADDRESS_MASK_64;
557 for (Index = 0; Index < SIZE_4KB / sizeof(UINT64); Index++) {
558 NewPageEntry[Index] = (BaseAddress + SIZE_2MB * Index) | AddressEncMask | IA32_PG_PS | ((*PageEntry) & PAGE_PROGATE_BITS);
559 }
560 (*PageEntry) = (UINT64)(UINTN)NewPageEntry | AddressEncMask | ((*PageEntry) & PAGE_ATTRIBUTE_BITS);
561 return RETURN_SUCCESS;
562 } else {
563 return RETURN_UNSUPPORTED;
564 }
565 } else {
566 return RETURN_UNSUPPORTED;
567 }
568 }
569
570 /**
571 Check the WP status in CR0 register. This bit is used to lock or unlock write
572 access to pages marked as read-only.
573
574 @retval TRUE Write protection is enabled.
575 @retval FALSE Write protection is disabled.
576 **/
577 BOOLEAN
578 IsReadOnlyPageWriteProtected (
579 VOID
580 )
581 {
582 //
583 // To avoid unforseen consequences, don't touch paging settings in SMM mode
584 // in this driver.
585 //
586 if (!IsInSmm ()) {
587 return ((AsmReadCr0 () & CR0_WP) != 0);
588 }
589 return FALSE;
590 }
591
592 /**
593 Disable Write Protect on pages marked as read-only.
594 **/
595 VOID
596 DisableReadOnlyPageWriteProtect (
597 VOID
598 )
599 {
600 //
601 // To avoid unforseen consequences, don't touch paging settings in SMM mode
602 // in this driver.
603 //
604 if (!IsInSmm ()) {
605 AsmWriteCr0 (AsmReadCr0 () & ~CR0_WP);
606 }
607 }
608
609 /**
610 Enable Write Protect on pages marked as read-only.
611 **/
612 VOID
613 EnableReadOnlyPageWriteProtect (
614 VOID
615 )
616 {
617 //
618 // To avoid unforseen consequences, don't touch paging settings in SMM mode
619 // in this driver.
620 //
621 if (!IsInSmm ()) {
622 AsmWriteCr0 (AsmReadCr0 () | CR0_WP);
623 }
624 }
625
626 /**
627 This function modifies the page attributes for the memory region specified by BaseAddress and
628 Length from their current attributes to the attributes specified by Attributes.
629
630 Caller should make sure BaseAddress and Length is at page boundary.
631
632 @param[in] PagingContext The paging context. NULL means get page table from current CPU context.
633 @param[in] BaseAddress The physical address that is the start address of a memory region.
634 @param[in] Length The size in bytes of the memory region.
635 @param[in] Attributes The bit mask of attributes to modify for the memory region.
636 @param[in] PageAction The page action.
637 @param[in] AllocatePagesFunc If page split is needed, this function is used to allocate more pages.
638 NULL mean page split is unsupported.
639 @param[out] IsSplitted TRUE means page table splitted. FALSE means page table not splitted.
640 @param[out] IsModified TRUE means page table modified. FALSE means page table not modified.
641
642 @retval RETURN_SUCCESS The attributes were modified for the memory region.
643 @retval RETURN_ACCESS_DENIED The attributes for the memory resource range specified by
644 BaseAddress and Length cannot be modified.
645 @retval RETURN_INVALID_PARAMETER Length is zero.
646 Attributes specified an illegal combination of attributes that
647 cannot be set together.
648 @retval RETURN_OUT_OF_RESOURCES There are not enough system resources to modify the attributes of
649 the memory resource range.
650 @retval RETURN_UNSUPPORTED The processor does not support one or more bytes of the memory
651 resource range specified by BaseAddress and Length.
652 The bit mask of attributes is not support for the memory resource
653 range specified by BaseAddress and Length.
654 **/
655 RETURN_STATUS
656 ConvertMemoryPageAttributes (
657 IN PAGE_TABLE_LIB_PAGING_CONTEXT *PagingContext OPTIONAL,
658 IN PHYSICAL_ADDRESS BaseAddress,
659 IN UINT64 Length,
660 IN UINT64 Attributes,
661 IN PAGE_ACTION PageAction,
662 IN PAGE_TABLE_LIB_ALLOCATE_PAGES AllocatePagesFunc OPTIONAL,
663 OUT BOOLEAN *IsSplitted, OPTIONAL
664 OUT BOOLEAN *IsModified OPTIONAL
665 )
666 {
667 PAGE_TABLE_LIB_PAGING_CONTEXT CurrentPagingContext;
668 UINT64 *PageEntry;
669 PAGE_ATTRIBUTE PageAttribute;
670 UINTN PageEntryLength;
671 PAGE_ATTRIBUTE SplitAttribute;
672 RETURN_STATUS Status;
673 BOOLEAN IsEntryModified;
674 BOOLEAN IsWpEnabled;
675
676 if ((BaseAddress & (SIZE_4KB - 1)) != 0) {
677 DEBUG ((DEBUG_ERROR, "BaseAddress(0x%lx) is not aligned!\n", BaseAddress));
678 return EFI_UNSUPPORTED;
679 }
680 if ((Length & (SIZE_4KB - 1)) != 0) {
681 DEBUG ((DEBUG_ERROR, "Length(0x%lx) is not aligned!\n", Length));
682 return EFI_UNSUPPORTED;
683 }
684 if (Length == 0) {
685 DEBUG ((DEBUG_ERROR, "Length is 0!\n"));
686 return RETURN_INVALID_PARAMETER;
687 }
688
689 if ((Attributes & ~(EFI_MEMORY_RP | EFI_MEMORY_RO | EFI_MEMORY_XP)) != 0) {
690 DEBUG ((DEBUG_ERROR, "Attributes(0x%lx) has unsupported bit\n", Attributes));
691 return EFI_UNSUPPORTED;
692 }
693
694 if (PagingContext == NULL) {
695 GetCurrentPagingContext (&CurrentPagingContext);
696 } else {
697 CopyMem (&CurrentPagingContext, PagingContext, sizeof(CurrentPagingContext));
698 }
699 switch(CurrentPagingContext.MachineType) {
700 case IMAGE_FILE_MACHINE_I386:
701 if (CurrentPagingContext.ContextData.Ia32.PageTableBase == 0) {
702 if (Attributes == 0) {
703 return EFI_SUCCESS;
704 } else {
705 DEBUG ((DEBUG_ERROR, "PageTable is 0!\n"));
706 return EFI_UNSUPPORTED;
707 }
708 }
709 if ((CurrentPagingContext.ContextData.Ia32.Attributes & PAGE_TABLE_LIB_PAGING_CONTEXT_IA32_X64_ATTRIBUTES_PAE) == 0) {
710 DEBUG ((DEBUG_ERROR, "Non-PAE Paging!\n"));
711 return EFI_UNSUPPORTED;
712 }
713 if ((BaseAddress + Length) > BASE_4GB) {
714 DEBUG ((DEBUG_ERROR, "Beyond 4GB memory in 32-bit mode!\n"));
715 return EFI_UNSUPPORTED;
716 }
717 break;
718 case IMAGE_FILE_MACHINE_X64:
719 ASSERT (CurrentPagingContext.ContextData.X64.PageTableBase != 0);
720 break;
721 default:
722 ASSERT(FALSE);
723 return EFI_UNSUPPORTED;
724 break;
725 }
726
727 // DEBUG ((DEBUG_ERROR, "ConvertMemoryPageAttributes(%x) - %016lx, %016lx, %02lx\n", IsSet, BaseAddress, Length, Attributes));
728
729 if (IsSplitted != NULL) {
730 *IsSplitted = FALSE;
731 }
732 if (IsModified != NULL) {
733 *IsModified = FALSE;
734 }
735 if (AllocatePagesFunc == NULL) {
736 AllocatePagesFunc = AllocatePageTableMemory;
737 }
738
739 //
740 // Make sure that the page table is changeable.
741 //
742 IsWpEnabled = IsReadOnlyPageWriteProtected ();
743 if (IsWpEnabled) {
744 DisableReadOnlyPageWriteProtect ();
745 }
746
747 //
748 // Below logic is to check 2M/4K page to make sure we donot waist memory.
749 //
750 Status = EFI_SUCCESS;
751 while (Length != 0) {
752 PageEntry = GetPageTableEntry (&CurrentPagingContext, BaseAddress, &PageAttribute);
753 if (PageEntry == NULL) {
754 Status = RETURN_UNSUPPORTED;
755 goto Done;
756 }
757 PageEntryLength = PageAttributeToLength (PageAttribute);
758 SplitAttribute = NeedSplitPage (BaseAddress, Length, PageEntry, PageAttribute);
759 if (SplitAttribute == PageNone) {
760 ConvertPageEntryAttribute (&CurrentPagingContext, PageEntry, Attributes, PageAction, &IsEntryModified);
761 if (IsEntryModified) {
762 if (IsModified != NULL) {
763 *IsModified = TRUE;
764 }
765 }
766 //
767 // Convert success, move to next
768 //
769 BaseAddress += PageEntryLength;
770 Length -= PageEntryLength;
771 } else {
772 if (AllocatePagesFunc == NULL) {
773 Status = RETURN_UNSUPPORTED;
774 goto Done;
775 }
776 Status = SplitPage (PageEntry, PageAttribute, SplitAttribute, AllocatePagesFunc);
777 if (RETURN_ERROR (Status)) {
778 Status = RETURN_UNSUPPORTED;
779 goto Done;
780 }
781 if (IsSplitted != NULL) {
782 *IsSplitted = TRUE;
783 }
784 if (IsModified != NULL) {
785 *IsModified = TRUE;
786 }
787 //
788 // Just split current page
789 // Convert success in next around
790 //
791 }
792 }
793
794 Done:
795 //
796 // Restore page table write protection, if any.
797 //
798 if (IsWpEnabled) {
799 EnableReadOnlyPageWriteProtect ();
800 }
801 return Status;
802 }
803
804 /**
805 This function assigns the page attributes for the memory region specified by BaseAddress and
806 Length from their current attributes to the attributes specified by Attributes.
807
808 Caller should make sure BaseAddress and Length is at page boundary.
809
810 Caller need guarentee the TPL <= TPL_NOTIFY, if there is split page request.
811
812 @param[in] PagingContext The paging context. NULL means get page table from current CPU context.
813 @param[in] BaseAddress The physical address that is the start address of a memory region.
814 @param[in] Length The size in bytes of the memory region.
815 @param[in] Attributes The bit mask of attributes to set for the memory region.
816 @param[in] AllocatePagesFunc If page split is needed, this function is used to allocate more pages.
817 NULL mean page split is unsupported.
818
819 @retval RETURN_SUCCESS The attributes were cleared for the memory region.
820 @retval RETURN_ACCESS_DENIED The attributes for the memory resource range specified by
821 BaseAddress and Length cannot be modified.
822 @retval RETURN_INVALID_PARAMETER Length is zero.
823 Attributes specified an illegal combination of attributes that
824 cannot be set together.
825 @retval RETURN_OUT_OF_RESOURCES There are not enough system resources to modify the attributes of
826 the memory resource range.
827 @retval RETURN_UNSUPPORTED The processor does not support one or more bytes of the memory
828 resource range specified by BaseAddress and Length.
829 The bit mask of attributes is not support for the memory resource
830 range specified by BaseAddress and Length.
831 **/
832 RETURN_STATUS
833 EFIAPI
834 AssignMemoryPageAttributes (
835 IN PAGE_TABLE_LIB_PAGING_CONTEXT *PagingContext OPTIONAL,
836 IN PHYSICAL_ADDRESS BaseAddress,
837 IN UINT64 Length,
838 IN UINT64 Attributes,
839 IN PAGE_TABLE_LIB_ALLOCATE_PAGES AllocatePagesFunc OPTIONAL
840 )
841 {
842 RETURN_STATUS Status;
843 BOOLEAN IsModified;
844 BOOLEAN IsSplitted;
845
846 // DEBUG((DEBUG_INFO, "AssignMemoryPageAttributes: 0x%lx - 0x%lx (0x%lx)\n", BaseAddress, Length, Attributes));
847 Status = ConvertMemoryPageAttributes (PagingContext, BaseAddress, Length, Attributes, PageActionAssign, AllocatePagesFunc, &IsSplitted, &IsModified);
848 if (!EFI_ERROR(Status)) {
849 if ((PagingContext == NULL) && IsModified) {
850 //
851 // Flush TLB as last step.
852 //
853 // Note: Since APs will always init CR3 register in HLT loop mode or do
854 // TLB flush in MWAIT loop mode, there's no need to flush TLB for them
855 // here.
856 //
857 CpuFlushTlb();
858 }
859 }
860
861 return Status;
862 }
863
864 /**
865 Check if Execute Disable feature is enabled or not.
866 **/
867 BOOLEAN
868 IsExecuteDisableEnabled (
869 VOID
870 )
871 {
872 MSR_CORE_IA32_EFER_REGISTER MsrEfer;
873
874 MsrEfer.Uint64 = AsmReadMsr64 (MSR_IA32_EFER);
875 return (MsrEfer.Bits.NXE == 1);
876 }
877
878 /**
879 Update GCD memory space attributes according to current page table setup.
880 **/
881 VOID
882 RefreshGcdMemoryAttributesFromPaging (
883 VOID
884 )
885 {
886 EFI_STATUS Status;
887 UINTN NumberOfDescriptors;
888 EFI_GCD_MEMORY_SPACE_DESCRIPTOR *MemorySpaceMap;
889 PAGE_TABLE_LIB_PAGING_CONTEXT PagingContext;
890 PAGE_ATTRIBUTE PageAttribute;
891 UINT64 *PageEntry;
892 UINT64 PageLength;
893 UINT64 MemorySpaceLength;
894 UINT64 Length;
895 UINT64 BaseAddress;
896 UINT64 PageStartAddress;
897 UINT64 Attributes;
898 UINT64 Capabilities;
899 UINT64 NewAttributes;
900 UINTN Index;
901
902 //
903 // Assuming that memory space map returned is sorted already; otherwise sort
904 // them in the order of lowest address to highest address.
905 //
906 Status = gDS->GetMemorySpaceMap (&NumberOfDescriptors, &MemorySpaceMap);
907 ASSERT_EFI_ERROR (Status);
908
909 GetCurrentPagingContext (&PagingContext);
910
911 Attributes = 0;
912 NewAttributes = 0;
913 BaseAddress = 0;
914 PageLength = 0;
915
916 if (IsExecuteDisableEnabled ()) {
917 Capabilities = EFI_MEMORY_RO | EFI_MEMORY_RP | EFI_MEMORY_XP;
918 } else {
919 Capabilities = EFI_MEMORY_RO | EFI_MEMORY_RP;
920 }
921
922 for (Index = 0; Index < NumberOfDescriptors; Index++) {
923 if (MemorySpaceMap[Index].GcdMemoryType == EfiGcdMemoryTypeNonExistent) {
924 continue;
925 }
926
927 //
928 // Sync the actual paging related capabilities back to GCD service first.
929 // As a side effect (good one), this can also help to avoid unnecessary
930 // memory map entries due to the different capabilities of the same type
931 // memory, such as multiple RT_CODE and RT_DATA entries in memory map,
932 // which could cause boot failure of some old Linux distro (before v4.3).
933 //
934 Status = gDS->SetMemorySpaceCapabilities (
935 MemorySpaceMap[Index].BaseAddress,
936 MemorySpaceMap[Index].Length,
937 MemorySpaceMap[Index].Capabilities | Capabilities
938 );
939 if (EFI_ERROR (Status)) {
940 //
941 // If we cannot udpate the capabilities, we cannot update its
942 // attributes either. So just simply skip current block of memory.
943 //
944 DEBUG ((
945 DEBUG_WARN,
946 "Failed to update capability: [%lu] %016lx - %016lx (%016lx -> %016lx)\r\n",
947 (UINT64)Index, MemorySpaceMap[Index].BaseAddress,
948 MemorySpaceMap[Index].BaseAddress + MemorySpaceMap[Index].Length - 1,
949 MemorySpaceMap[Index].Capabilities,
950 MemorySpaceMap[Index].Capabilities | Capabilities
951 ));
952 continue;
953 }
954
955 if (MemorySpaceMap[Index].BaseAddress >= (BaseAddress + PageLength)) {
956 //
957 // Current memory space starts at a new page. Resetting PageLength will
958 // trigger a retrieval of page attributes at new address.
959 //
960 PageLength = 0;
961 } else {
962 //
963 // In case current memory space is not adjacent to last one
964 //
965 PageLength -= (MemorySpaceMap[Index].BaseAddress - BaseAddress);
966 }
967
968 //
969 // Sync actual page attributes to GCD
970 //
971 BaseAddress = MemorySpaceMap[Index].BaseAddress;
972 MemorySpaceLength = MemorySpaceMap[Index].Length;
973 while (MemorySpaceLength > 0) {
974 if (PageLength == 0) {
975 PageEntry = GetPageTableEntry (&PagingContext, BaseAddress, &PageAttribute);
976 if (PageEntry == NULL) {
977 break;
978 }
979
980 //
981 // Note current memory space might start in the middle of a page
982 //
983 PageStartAddress = (*PageEntry) & (UINT64)PageAttributeToMask(PageAttribute);
984 PageLength = PageAttributeToLength (PageAttribute) - (BaseAddress - PageStartAddress);
985 Attributes = GetAttributesFromPageEntry (PageEntry);
986 }
987
988 Length = MIN (PageLength, MemorySpaceLength);
989 if (Attributes != (MemorySpaceMap[Index].Attributes &
990 EFI_MEMORY_PAGETYPE_MASK)) {
991 NewAttributes = (MemorySpaceMap[Index].Attributes &
992 ~EFI_MEMORY_PAGETYPE_MASK) | Attributes;
993 Status = gDS->SetMemorySpaceAttributes (
994 BaseAddress,
995 Length,
996 NewAttributes
997 );
998 ASSERT_EFI_ERROR (Status);
999 DEBUG ((
1000 DEBUG_VERBOSE,
1001 "Updated memory space attribute: [%lu] %016lx - %016lx (%016lx -> %016lx)\r\n",
1002 (UINT64)Index, BaseAddress, BaseAddress + Length - 1,
1003 MemorySpaceMap[Index].Attributes,
1004 NewAttributes
1005 ));
1006 }
1007
1008 PageLength -= Length;
1009 MemorySpaceLength -= Length;
1010 BaseAddress += Length;
1011 }
1012 }
1013
1014 FreePool (MemorySpaceMap);
1015 }
1016
1017 /**
1018 Initialize a buffer pool for page table use only.
1019
1020 To reduce the potential split operation on page table, the pages reserved for
1021 page table should be allocated in the times of PAGE_TABLE_POOL_UNIT_PAGES and
1022 at the boundary of PAGE_TABLE_POOL_ALIGNMENT. So the page pool is always
1023 initialized with number of pages greater than or equal to the given PoolPages.
1024
1025 Once the pages in the pool are used up, this method should be called again to
1026 reserve at least another PAGE_TABLE_POOL_UNIT_PAGES. Usually this won't happen
1027 often in practice.
1028
1029 @param[in] PoolPages The least page number of the pool to be created.
1030
1031 @retval TRUE The pool is initialized successfully.
1032 @retval FALSE The memory is out of resource.
1033 **/
1034 BOOLEAN
1035 InitializePageTablePool (
1036 IN UINTN PoolPages
1037 )
1038 {
1039 VOID *Buffer;
1040 BOOLEAN IsModified;
1041
1042 //
1043 // Always reserve at least PAGE_TABLE_POOL_UNIT_PAGES, including one page for
1044 // header.
1045 //
1046 PoolPages += 1; // Add one page for header.
1047 PoolPages = ((PoolPages - 1) / PAGE_TABLE_POOL_UNIT_PAGES + 1) *
1048 PAGE_TABLE_POOL_UNIT_PAGES;
1049 Buffer = AllocateAlignedPages (PoolPages, PAGE_TABLE_POOL_ALIGNMENT);
1050 if (Buffer == NULL) {
1051 DEBUG ((DEBUG_ERROR, "ERROR: Out of aligned pages\r\n"));
1052 return FALSE;
1053 }
1054
1055 //
1056 // Link all pools into a list for easier track later.
1057 //
1058 if (mPageTablePool == NULL) {
1059 mPageTablePool = Buffer;
1060 mPageTablePool->NextPool = mPageTablePool;
1061 } else {
1062 ((PAGE_TABLE_POOL *)Buffer)->NextPool = mPageTablePool->NextPool;
1063 mPageTablePool->NextPool = Buffer;
1064 mPageTablePool = Buffer;
1065 }
1066
1067 //
1068 // Reserve one page for pool header.
1069 //
1070 mPageTablePool->FreePages = PoolPages - 1;
1071 mPageTablePool->Offset = EFI_PAGES_TO_SIZE (1);
1072
1073 //
1074 // Mark the whole pool pages as read-only.
1075 //
1076 ConvertMemoryPageAttributes (
1077 NULL,
1078 (PHYSICAL_ADDRESS)(UINTN)Buffer,
1079 EFI_PAGES_TO_SIZE (PoolPages),
1080 EFI_MEMORY_RO,
1081 PageActionSet,
1082 AllocatePageTableMemory,
1083 NULL,
1084 &IsModified
1085 );
1086 ASSERT (IsModified == TRUE);
1087
1088 return TRUE;
1089 }
1090
1091 /**
1092 This API provides a way to allocate memory for page table.
1093
1094 This API can be called more than once to allocate memory for page tables.
1095
1096 Allocates the number of 4KB pages and returns a pointer to the allocated
1097 buffer. The buffer returned is aligned on a 4KB boundary.
1098
1099 If Pages is 0, then NULL is returned.
1100 If there is not enough memory remaining to satisfy the request, then NULL is
1101 returned.
1102
1103 @param Pages The number of 4 KB pages to allocate.
1104
1105 @return A pointer to the allocated buffer or NULL if allocation fails.
1106
1107 **/
1108 VOID *
1109 EFIAPI
1110 AllocatePageTableMemory (
1111 IN UINTN Pages
1112 )
1113 {
1114 VOID *Buffer;
1115
1116 if (Pages == 0) {
1117 return NULL;
1118 }
1119
1120 //
1121 // Renew the pool if necessary.
1122 //
1123 if (mPageTablePool == NULL ||
1124 Pages > mPageTablePool->FreePages) {
1125 if (!InitializePageTablePool (Pages)) {
1126 return NULL;
1127 }
1128 }
1129
1130 Buffer = (UINT8 *)mPageTablePool + mPageTablePool->Offset;
1131
1132 mPageTablePool->Offset += EFI_PAGES_TO_SIZE (Pages);
1133 mPageTablePool->FreePages -= Pages;
1134
1135 return Buffer;
1136 }
1137
1138 /**
1139 Initialize the Page Table lib.
1140 **/
1141 VOID
1142 InitializePageTableLib (
1143 VOID
1144 )
1145 {
1146 PAGE_TABLE_LIB_PAGING_CONTEXT CurrentPagingContext;
1147
1148 GetCurrentPagingContext (&CurrentPagingContext);
1149
1150 //
1151 // Reserve memory of page tables for future uses, if paging is enabled.
1152 //
1153 if (CurrentPagingContext.ContextData.X64.PageTableBase != 0 &&
1154 (CurrentPagingContext.ContextData.Ia32.Attributes &
1155 PAGE_TABLE_LIB_PAGING_CONTEXT_IA32_X64_ATTRIBUTES_PAE) != 0) {
1156 DisableReadOnlyPageWriteProtect ();
1157 InitializePageTablePool (1);
1158 EnableReadOnlyPageWriteProtect ();
1159 }
1160
1161 DEBUG ((DEBUG_INFO, "CurrentPagingContext:\n", CurrentPagingContext.MachineType));
1162 DEBUG ((DEBUG_INFO, " MachineType - 0x%x\n", CurrentPagingContext.MachineType));
1163 DEBUG ((DEBUG_INFO, " PageTableBase - 0x%x\n", CurrentPagingContext.ContextData.X64.PageTableBase));
1164 DEBUG ((DEBUG_INFO, " Attributes - 0x%x\n", CurrentPagingContext.ContextData.X64.Attributes));
1165
1166 return ;
1167 }
1168