3 Virtual Memory Management Services to set or clear the memory encryption.
5 Copyright (c) 2006 - 2018, Intel Corporation. All rights reserved.<BR>
6 Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
8 SPDX-License-Identifier: BSD-2-Clause-Patent
10 Code is derived from MdeModulePkg/Core/DxeIplPeim/X64/VirtualMemory.c
13 There a lot of duplicated codes for Page Table operations. These
14 codes should be moved to a common library (PageTablesLib) so that it is
15 more friendly for review and maintain. There is a new feature requirement
16 https://bugzilla.tianocore.org/show_bug.cgi?id=847 which is to implement
17 the library. After the lib is introduced, this file will be refactored.
22 #include <Uefi/UefiBaseType.h>
23 #include <Library/CpuLib.h>
24 #include <Library/BaseLib.h>
25 #include <Library/DebugLib.h>
26 #include <Library/MemEncryptTdxLib.h>
27 #include "VirtualMemory.h"
28 #include <IndustryStandard/Tdx.h>
29 #include <Library/TdxLib.h>
30 #include <ConfidentialComputingGuestAttr.h>
37 STATIC PAGE_TABLE_POOL
*mPageTablePool
= NULL
;
40 Returns boolean to indicate whether to indicate which, if any, memory encryption is enabled
42 @param[in] Type Bitmask of encryption technologies to check is enabled
44 @retval TRUE The encryption type(s) are enabled
45 @retval FALSE The encryption type(s) are not enabled
49 MemEncryptTdxIsEnabled (
53 return CC_GUEST_IS_TDX (PcdGet64 (PcdConfidentialComputingGuestAttr
));
57 Get the memory encryption mask
59 @param[out] EncryptionMask contains the pte mask.
64 GetMemEncryptionAddressMask (
68 return TdSharedPageMask ();
72 Initialize a buffer pool for page table use only.
74 To reduce the potential split operation on page table, the pages reserved for
75 page table should be allocated in the times of PAGE_TABLE_POOL_UNIT_PAGES and
76 at the boundary of PAGE_TABLE_POOL_ALIGNMENT. So the page pool is always
77 initialized with number of pages greater than or equal to the given
80 Once the pages in the pool are used up, this method should be called again to
81 reserve at least another PAGE_TABLE_POOL_UNIT_PAGES. Usually this won't
82 happen often in practice.
84 @param[in] PoolPages The least page number of the pool to be created.
86 @retval TRUE The pool is initialized successfully.
87 @retval FALSE The memory is out of resource.
91 InitializePageTablePool (
98 // Always reserve at least PAGE_TABLE_POOL_UNIT_PAGES, including one page for
101 PoolPages
+= 1; // Add one page for header.
102 PoolPages
= ((PoolPages
- 1) / PAGE_TABLE_POOL_UNIT_PAGES
+ 1) *
103 PAGE_TABLE_POOL_UNIT_PAGES
;
104 Buffer
= AllocateAlignedPages (PoolPages
, PAGE_TABLE_POOL_ALIGNMENT
);
105 if (Buffer
== NULL
) {
106 DEBUG ((DEBUG_ERROR
, "ERROR: Out of aligned pages\r\n"));
111 // Link all pools into a list for easier track later.
113 if (mPageTablePool
== NULL
) {
114 mPageTablePool
= Buffer
;
115 mPageTablePool
->NextPool
= mPageTablePool
;
117 ((PAGE_TABLE_POOL
*)Buffer
)->NextPool
= mPageTablePool
->NextPool
;
118 mPageTablePool
->NextPool
= Buffer
;
119 mPageTablePool
= Buffer
;
123 // Reserve one page for pool header.
125 mPageTablePool
->FreePages
= PoolPages
- 1;
126 mPageTablePool
->Offset
= EFI_PAGES_TO_SIZE (1);
132 This API provides a way to allocate memory for page table.
134 This API can be called more than once to allocate memory for page tables.
136 Allocates the number of 4KB pages and returns a pointer to the allocated
137 buffer. The buffer returned is aligned on a 4KB boundary.
139 If Pages is 0, then NULL is returned.
140 If there is not enough memory remaining to satisfy the request, then NULL is
143 @param Pages The number of 4 KB pages to allocate.
145 @return A pointer to the allocated buffer or NULL if allocation fails.
151 AllocatePageTableMemory (
162 // Renew the pool if necessary.
164 if ((mPageTablePool
== NULL
) ||
165 (Pages
> mPageTablePool
->FreePages
))
167 if (!InitializePageTablePool (Pages
)) {
172 Buffer
= (UINT8
*)mPageTablePool
+ mPageTablePool
->Offset
;
174 mPageTablePool
->Offset
+= EFI_PAGES_TO_SIZE (Pages
);
175 mPageTablePool
->FreePages
-= Pages
;
179 "%a:%a: Buffer=0x%Lx Pages=%ld\n",
192 @param[in] PhysicalAddress Start physical address the 2M page
194 @param[in, out] PageEntry2M Pointer to 2M page entry.
195 @param[in] StackBase Stack base address.
196 @param[in] StackSize Stack size.
202 IN PHYSICAL_ADDRESS PhysicalAddress
,
203 IN OUT UINT64
*PageEntry2M
,
204 IN PHYSICAL_ADDRESS StackBase
,
206 IN UINT64 AddressEncMask
209 PHYSICAL_ADDRESS PhysicalAddress4K
;
210 UINTN IndexOfPageTableEntries
;
211 PAGE_TABLE_4K_ENTRY
*PageTableEntry
, *PageTableEntry1
;
213 PageTableEntry
= AllocatePageTableMemory (1);
215 PageTableEntry1
= PageTableEntry
;
217 if (PageTableEntry
== NULL
) {
222 PhysicalAddress4K
= PhysicalAddress
;
223 for (IndexOfPageTableEntries
= 0;
224 IndexOfPageTableEntries
< 512;
225 (IndexOfPageTableEntries
++,
227 PhysicalAddress4K
+= SIZE_4KB
))
230 // Fill in the Page Table entries
232 PageTableEntry
->Uint64
= (UINT64
)PhysicalAddress4K
| AddressEncMask
;
233 PageTableEntry
->Bits
.ReadWrite
= 1;
234 PageTableEntry
->Bits
.Present
= 1;
235 if ((PhysicalAddress4K
>= StackBase
) &&
236 (PhysicalAddress4K
< StackBase
+ StackSize
))
239 // Set Nx bit for stack.
241 PageTableEntry
->Bits
.Nx
= 1;
246 // Fill in 2M page entry.
248 *PageEntry2M
= ((UINT64
)(UINTN
)PageTableEntry1
|
249 IA32_PG_P
| IA32_PG_RW
| AddressEncMask
);
253 Set one page of page table pool memory to be read-only.
255 @param[in] PageTableBase Base address of page table (CR3).
256 @param[in] Address Start address of a page to be set as read-only.
257 @param[in] Level4Paging Level 4 paging flag.
262 SetPageTablePoolReadOnly (
263 IN UINTN PageTableBase
,
264 IN EFI_PHYSICAL_ADDRESS Address
,
265 IN BOOLEAN Level4Paging
270 UINT64 AddressEncMask
;
271 UINT64 ActiveAddressEncMask
;
272 EFI_PHYSICAL_ADDRESS PhysicalAddress
;
274 UINT64
*NewPageTable
;
282 if (PageTableBase
== 0) {
288 // Since the page table is always from page table pool, which is always
289 // located at the boundary of PcdPageTablePoolAlignment, we just need to
290 // set the whole pool unit to be read-only.
292 Address
= Address
& PAGE_TABLE_POOL_ALIGN_MASK
;
294 LevelShift
[1] = PAGING_L1_ADDRESS_SHIFT
;
295 LevelShift
[2] = PAGING_L2_ADDRESS_SHIFT
;
296 LevelShift
[3] = PAGING_L3_ADDRESS_SHIFT
;
297 LevelShift
[4] = PAGING_L4_ADDRESS_SHIFT
;
299 LevelMask
[1] = PAGING_4K_ADDRESS_MASK_64
;
300 LevelMask
[2] = PAGING_2M_ADDRESS_MASK_64
;
301 LevelMask
[3] = PAGING_1G_ADDRESS_MASK_64
;
302 LevelMask
[4] = PAGING_1G_ADDRESS_MASK_64
;
304 LevelSize
[1] = SIZE_4KB
;
305 LevelSize
[2] = SIZE_2MB
;
306 LevelSize
[3] = SIZE_1GB
;
307 LevelSize
[4] = SIZE_512GB
;
309 AddressEncMask
= GetMemEncryptionAddressMask () &
310 PAGING_1G_ADDRESS_MASK_64
;
311 PageTable
= (UINT64
*)(UINTN
)PageTableBase
;
312 PoolUnitSize
= PAGE_TABLE_POOL_UNIT_SIZE
;
314 for (Level
= (Level4Paging
) ? 4 : 3; Level
> 0; --Level
) {
315 Index
= ((UINTN
)RShiftU64 (Address
, LevelShift
[Level
]));
316 Index
&= PAGING_PAE_INDEX_MASK
;
318 PageAttr
= PageTable
[Index
];
319 ActiveAddressEncMask
= GetMemEncryptionAddressMask () & PageAttr
;
321 if ((PageAttr
& IA32_PG_PS
) == 0) {
323 // Go to next level of table.
325 PageTable
= (UINT64
*)(UINTN
)(PageAttr
& ~AddressEncMask
&
326 PAGING_4K_ADDRESS_MASK_64
);
330 if (PoolUnitSize
>= LevelSize
[Level
]) {
332 // Clear R/W bit if current page granularity is not larger than pool unit
335 if ((PageAttr
& IA32_PG_RW
) != 0) {
336 while (PoolUnitSize
> 0) {
338 // PAGE_TABLE_POOL_UNIT_SIZE and PAGE_TABLE_POOL_ALIGNMENT are fit in
339 // one page (2MB). Then we don't need to update attributes for pages
340 // crossing page directory. ASSERT below is for that purpose.
342 ASSERT (Index
< EFI_PAGE_SIZE
/sizeof (UINT64
));
344 PageTable
[Index
] &= ~(UINT64
)IA32_PG_RW
;
345 PoolUnitSize
-= LevelSize
[Level
];
354 // The smaller granularity of page must be needed.
358 NewPageTable
= AllocatePageTableMemory (1);
359 if (NewPageTable
== NULL
) {
364 PhysicalAddress
= PageAttr
& LevelMask
[Level
];
366 EntryIndex
< EFI_PAGE_SIZE
/sizeof (UINT64
);
369 NewPageTable
[EntryIndex
] = PhysicalAddress
| ActiveAddressEncMask
|
370 IA32_PG_P
| IA32_PG_RW
;
372 NewPageTable
[EntryIndex
] |= IA32_PG_PS
;
375 PhysicalAddress
+= LevelSize
[Level
- 1];
378 PageTable
[Index
] = (UINT64
)(UINTN
)NewPageTable
| ActiveAddressEncMask
|
379 IA32_PG_P
| IA32_PG_RW
;
380 PageTable
= NewPageTable
;
386 Prevent the memory pages used for page table from been overwritten.
388 @param[in] PageTableBase Base address of page table (CR3).
389 @param[in] Level4Paging Level 4 paging flag.
394 EnablePageTableProtection (
395 IN UINTN PageTableBase
,
396 IN BOOLEAN Level4Paging
399 PAGE_TABLE_POOL
*HeadPool
;
400 PAGE_TABLE_POOL
*Pool
;
402 EFI_PHYSICAL_ADDRESS Address
;
404 if (mPageTablePool
== NULL
) {
409 // SetPageTablePoolReadOnly might update mPageTablePool. It's safer to
410 // remember original one in advance.
412 HeadPool
= mPageTablePool
;
415 Address
= (EFI_PHYSICAL_ADDRESS
)(UINTN
)Pool
;
416 PoolSize
= Pool
->Offset
+ EFI_PAGES_TO_SIZE (Pool
->FreePages
);
419 // The size of one pool must be multiple of PAGE_TABLE_POOL_UNIT_SIZE,
420 // which is one of page size of the processor (2MB by default). Let's apply
421 // the protection to them one by one.
423 while (PoolSize
> 0) {
424 SetPageTablePoolReadOnly (PageTableBase
, Address
, Level4Paging
);
425 Address
+= PAGE_TABLE_POOL_UNIT_SIZE
;
426 PoolSize
-= PAGE_TABLE_POOL_UNIT_SIZE
;
429 Pool
= Pool
->NextPool
;
430 } while (Pool
!= HeadPool
);
436 @param[in] PhysicalAddress Start physical address the 1G page
438 @param[in, out] PageEntry1G Pointer to 1G page entry.
439 @param[in] StackBase Stack base address.
440 @param[in] StackSize Stack size.
446 IN PHYSICAL_ADDRESS PhysicalAddress
,
447 IN OUT UINT64
*PageEntry1G
,
448 IN PHYSICAL_ADDRESS StackBase
,
452 PHYSICAL_ADDRESS PhysicalAddress2M
;
453 UINTN IndexOfPageDirectoryEntries
;
454 PAGE_TABLE_ENTRY
*PageDirectoryEntry
;
455 UINT64 AddressEncMask
;
456 UINT64 ActiveAddressEncMask
;
458 PageDirectoryEntry
= AllocatePageTableMemory (1);
459 if (PageDirectoryEntry
== NULL
) {
463 AddressEncMask
= GetMemEncryptionAddressMask ();
464 ASSERT (PageDirectoryEntry
!= NULL
);
466 ActiveAddressEncMask
= *PageEntry1G
& AddressEncMask
;
468 // Fill in 1G page entry.
470 *PageEntry1G
= ((UINT64
)(UINTN
)PageDirectoryEntry
|
471 IA32_PG_P
| IA32_PG_RW
| ActiveAddressEncMask
);
473 PhysicalAddress2M
= PhysicalAddress
;
474 for (IndexOfPageDirectoryEntries
= 0;
475 IndexOfPageDirectoryEntries
< 512;
476 (IndexOfPageDirectoryEntries
++,
477 PageDirectoryEntry
++,
478 PhysicalAddress2M
+= SIZE_2MB
))
480 if ((PhysicalAddress2M
< StackBase
+ StackSize
) &&
481 ((PhysicalAddress2M
+ SIZE_2MB
) > StackBase
))
484 // Need to split this 2M page that covers stack range.
488 (UINT64
*)PageDirectoryEntry
,
495 // Fill in the Page Directory entries
497 PageDirectoryEntry
->Uint64
= (UINT64
)PhysicalAddress2M
| ActiveAddressEncMask
;
498 PageDirectoryEntry
->Bits
.ReadWrite
= 1;
499 PageDirectoryEntry
->Bits
.Present
= 1;
500 PageDirectoryEntry
->Bits
.MustBe1
= 1;
506 Set or Clear the memory shared bit
508 @param[in] PagetablePoint Page table entry pointer (PTE).
509 @param[in] Mode Set or Clear shared bit
513 SetOrClearSharedBit (
514 IN OUT UINT64
*PageTablePointer
,
515 IN TDX_PAGETABLE_MODE Mode
,
516 IN PHYSICAL_ADDRESS PhysicalAddress
,
520 UINT64 AddressEncMask
;
523 AddressEncMask
= GetMemEncryptionAddressMask ();
526 // Set or clear page table entry. Also, set shared bit in physical address, before calling MapGPA
528 if (Mode
== SetSharedBit
) {
529 *PageTablePointer
|= AddressEncMask
;
530 PhysicalAddress
|= AddressEncMask
;
532 *PageTablePointer
&= ~AddressEncMask
;
533 PhysicalAddress
&= ~AddressEncMask
;
536 Status
= TdVmCall (TDVMCALL_MAPGPA
, PhysicalAddress
, Length
, 0, 0, NULL
);
539 // If changing shared to private, must accept-page again
541 if (Mode
== ClearSharedBit
) {
542 TdAcceptPages (PhysicalAddress
, Length
/ EFI_PAGE_SIZE
, EFI_PAGE_SIZE
);
547 "%a:%a: pte=0x%Lx AddressEncMask=0x%Lx Mode=0x%x MapGPA Status=0x%x\n",
558 Check the WP status in CR0 register. This bit is used to lock or unlock write
559 access to pages marked as read-only.
561 @retval TRUE Write protection is enabled.
562 @retval FALSE Write protection is disabled.
566 IsReadOnlyPageWriteProtected (
570 return ((AsmReadCr0 () & BIT16
) != 0);
574 Disable Write Protect on pages marked as read-only.
578 DisableReadOnlyPageWriteProtect (
582 AsmWriteCr0 (AsmReadCr0 () & ~BIT16
);
586 Enable Write Protect on pages marked as read-only.
589 EnableReadOnlyPageWriteProtect (
593 AsmWriteCr0 (AsmReadCr0 () | BIT16
);
597 This function either sets or clears memory encryption for the memory
598 region specified by PhysicalAddress and Length from the current page table
601 The function iterates through the PhysicalAddress one page at a time, and set
602 or clears the memory encryption in the page table. If it encounters
603 that a given physical address range is part of large page then it attempts to
604 change the attribute at one go (based on size), otherwise it splits the
605 large pages into smaller (e.g 2M page into 4K pages) and then try to set or
606 clear the shared bit on the smallest page size.
608 @param[in] Cr3BaseAddress Cr3 Base Address (if zero then use
610 @param[in] PhysicalAddress The physical address that is the start
611 address of a memory region.
612 @param[in] Length The length of memory region
613 @param[in] Mode Set or Clear mode
615 @retval RETURN_SUCCESS The attributes were cleared for the
617 @retval RETURN_INVALID_PARAMETER Number of pages is zero.
618 @retval RETURN_UNSUPPORTED Setting the memory encyrption attribute
624 SetMemorySharedOrPrivate (
625 IN PHYSICAL_ADDRESS Cr3BaseAddress
,
626 IN PHYSICAL_ADDRESS PhysicalAddress
,
628 IN TDX_PAGETABLE_MODE Mode
631 PAGE_MAP_AND_DIRECTORY_POINTER
*PageMapLevel4Entry
;
632 PAGE_MAP_AND_DIRECTORY_POINTER
*PageUpperDirectoryPointerEntry
;
633 PAGE_MAP_AND_DIRECTORY_POINTER
*PageDirectoryPointerEntry
;
634 PAGE_TABLE_1G_ENTRY
*PageDirectory1GEntry
;
635 PAGE_TABLE_ENTRY
*PageDirectory2MEntry
;
636 PAGE_TABLE_4K_ENTRY
*PageTableEntry
;
638 UINT64 AddressEncMask
;
639 UINT64 ActiveEncMask
;
641 RETURN_STATUS Status
;
643 BOOLEAN Page5LevelSupport
;
646 // Set PageMapLevel4Entry to suppress incorrect compiler/analyzer warnings.
648 PageMapLevel4Entry
= NULL
;
652 "%a:%a: Cr3Base=0x%Lx Physical=0x%Lx Length=0x%Lx Mode=%a\n",
658 (Mode
== SetSharedBit
) ? "Shared" : "Private"
662 // Check if we have a valid memory encryption mask
664 AddressEncMask
= GetMemEncryptionAddressMask ();
666 PgTableMask
= AddressEncMask
| EFI_PAGE_MASK
;
669 return RETURN_INVALID_PARAMETER
;
673 // Make sure that the page table is changeable.
675 IsWpEnabled
= IsReadOnlyPageWriteProtected ();
677 DisableReadOnlyPageWriteProtect ();
681 // If Cr3BaseAddress is not specified then read the current CR3
683 if (Cr3BaseAddress
== 0) {
684 Cr3BaseAddress
= AsmReadCr3 ();
688 // CPU will already have LA57 enabled so just check CR4
690 Cr4
.UintN
= AsmReadCr4 ();
692 Page5LevelSupport
= (Cr4
.Bits
.LA57
? TRUE
: FALSE
);
694 // If 5-level pages, adjust Cr3BaseAddress to point to first 4-level page directory,
695 // we will only have 1
697 if (Page5LevelSupport
) {
698 Cr3BaseAddress
= *(UINT64
*)Cr3BaseAddress
& ~PgTableMask
;
701 Status
= EFI_SUCCESS
;
704 PageMapLevel4Entry
= (VOID
*)(Cr3BaseAddress
& ~PgTableMask
);
705 PageMapLevel4Entry
+= PML4_OFFSET (PhysicalAddress
);
706 if (!PageMapLevel4Entry
->Bits
.Present
) {
709 "%a:%a: bad PML4 for Physical=0x%Lx\n",
714 Status
= RETURN_NO_MAPPING
;
718 PageDirectory1GEntry
= (VOID
*)(
719 (PageMapLevel4Entry
->Bits
.PageTableBaseAddress
<<
722 PageDirectory1GEntry
+= PDP_OFFSET (PhysicalAddress
);
723 if (!PageDirectory1GEntry
->Bits
.Present
) {
726 "%a:%a: bad PDPE for Physical=0x%Lx\n",
731 Status
= RETURN_NO_MAPPING
;
736 // If the MustBe1 bit is not 1, it's not actually a 1GB entry
738 if (PageDirectory1GEntry
->Bits
.MustBe1
) {
741 // If we have at least 1GB to go, we can just update this entry
743 if (!(PhysicalAddress
& (BIT30
- 1)) && (Length
>= BIT30
)) {
744 SetOrClearSharedBit (&PageDirectory1GEntry
->Uint64
, Mode
, PhysicalAddress
, BIT30
);
747 "%a:%a: updated 1GB entry for Physical=0x%Lx\n",
752 PhysicalAddress
+= BIT30
;
756 // We must split the page
760 "%a:%a: splitting 1GB page for Physical=0x%Lx\n",
766 (UINT64
)PageDirectory1GEntry
->Bits
.PageTableBaseAddress
<< 30,
767 (UINT64
*)PageDirectory1GEntry
,
777 PageUpperDirectoryPointerEntry
=
778 (PAGE_MAP_AND_DIRECTORY_POINTER
*)PageDirectory1GEntry
;
779 PageDirectory2MEntry
=
781 (PageUpperDirectoryPointerEntry
->Bits
.PageTableBaseAddress
<<
784 PageDirectory2MEntry
+= PDE_OFFSET (PhysicalAddress
);
785 if (!PageDirectory2MEntry
->Bits
.Present
) {
788 "%a:%a: bad PDE for Physical=0x%Lx\n",
793 Status
= RETURN_NO_MAPPING
;
798 // If the MustBe1 bit is not a 1, it's not a 2MB entry
800 if (PageDirectory2MEntry
->Bits
.MustBe1
) {
803 // If we have at least 2MB left to go, we can just update this entry
805 if (!(PhysicalAddress
& (BIT21
-1)) && (Length
>= BIT21
)) {
806 SetOrClearSharedBit (&PageDirectory2MEntry
->Uint64
, Mode
, PhysicalAddress
, BIT21
);
807 PhysicalAddress
+= BIT21
;
811 // We must split up this page into 4K pages
815 "%a:%a: splitting 2MB page for Physical=0x%Lx\n",
821 ActiveEncMask
= PageDirectory2MEntry
->Uint64
& AddressEncMask
;
824 (UINT64
)PageDirectory2MEntry
->Bits
.PageTableBaseAddress
<< 21,
825 (UINT64
*)PageDirectory2MEntry
,
833 PageDirectoryPointerEntry
=
834 (PAGE_MAP_AND_DIRECTORY_POINTER
*)PageDirectory2MEntry
;
837 (PageDirectoryPointerEntry
->Bits
.PageTableBaseAddress
<<
840 PageTableEntry
+= PTE_OFFSET (PhysicalAddress
);
841 if (!PageTableEntry
->Bits
.Present
) {
844 "%a:%a: bad PTE for Physical=0x%Lx\n",
849 Status
= RETURN_NO_MAPPING
;
853 SetOrClearSharedBit (&PageTableEntry
->Uint64
, Mode
, PhysicalAddress
, EFI_PAGE_SIZE
);
854 PhysicalAddress
+= EFI_PAGE_SIZE
;
855 Length
-= EFI_PAGE_SIZE
;
861 // Protect the page table by marking the memory used for page table to be
865 EnablePageTableProtection ((UINTN
)PageMapLevel4Entry
, TRUE
);
875 // Restore page table write protection, if any.
878 EnableReadOnlyPageWriteProtect ();
885 This function clears memory shared bit for the memory region specified by
886 BaseAddress and NumPages from the current page table context.
888 @param[in] Cr3BaseAddress Cr3 Base Address (if zero then use
890 @param[in] BaseAddress The physical address that is the start
891 address of a memory region.
892 @param[in] NumPages The number of pages from start memory
895 @retval RETURN_SUCCESS The attributes were cleared for the
897 @retval RETURN_INVALID_PARAMETER Number of pages is zero.
898 @retval RETURN_UNSUPPORTED Clearing the memory encryption attribute
903 MemEncryptTdxSetPageSharedBit (
904 IN PHYSICAL_ADDRESS Cr3BaseAddress
,
905 IN PHYSICAL_ADDRESS BaseAddress
,
909 return SetMemorySharedOrPrivate (
912 EFI_PAGES_TO_SIZE (NumPages
),
918 This function sets memory shared bit for the memory region specified by
919 BaseAddress and NumPages from the current page table context.
921 @param[in] Cr3BaseAddress Cr3 Base Address (if zero then use
923 @param[in] BaseAddress The physical address that is the start
924 address of a memory region.
925 @param[in] NumPages The number of pages from start memory
928 @retval RETURN_SUCCESS The attributes were set for the memory
930 @retval RETURN_INVALID_PARAMETER Number of pages is zero.
931 @retval RETURN_UNSUPPORTED Setting the memory encryption attribute
936 MemEncryptTdxClearPageSharedBit (
937 IN PHYSICAL_ADDRESS Cr3BaseAddress
,
938 IN PHYSICAL_ADDRESS BaseAddress
,
942 return SetMemorySharedOrPrivate (
945 EFI_PAGES_TO_SIZE (NumPages
),