3 Virtual Memory Management Services to set or clear the memory encryption.
5 Copyright (c) 2006 - 2018, Intel Corporation. All rights reserved.<BR>
6 Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
8 SPDX-License-Identifier: BSD-2-Clause-Patent
10 Code is derived from MdeModulePkg/Core/DxeIplPeim/X64/VirtualMemory.c
13 There a lot of duplicated codes for Page Table operations. These
14 codes should be moved to a common library (PageTablesLib) so that it is
15 more friendly for review and maintain. There is a new feature requirement
16 https://bugzilla.tianocore.org/show_bug.cgi?id=847 which is to implement
17 the library. After the lib is introduced, this file will be refactored.
22 #include <Uefi/UefiBaseType.h>
23 #include <Library/CpuLib.h>
24 #include <Library/BaseLib.h>
25 #include <Library/DebugLib.h>
26 #include <Library/MemEncryptTdxLib.h>
27 #include "VirtualMemory.h"
28 #include <IndustryStandard/Tdx.h>
29 #include <Library/TdxLib.h>
30 #include <Library/UefiBootServicesTableLib.h>
31 #include <Protocol/MemoryAccept.h>
32 #include <ConfidentialComputingGuestAttr.h>
39 STATIC PAGE_TABLE_POOL
*mPageTablePool
= NULL
;
42 Returns boolean to indicate whether to indicate which, if any, memory encryption is enabled
44 @param[in] Type Bitmask of encryption technologies to check is enabled
46 @retval TRUE The encryption type(s) are enabled
47 @retval FALSE The encryption type(s) are not enabled
51 MemEncryptTdxIsEnabled (
55 return CC_GUEST_IS_TDX (PcdGet64 (PcdConfidentialComputingGuestAttr
));
59 Get the memory encryption mask
61 @param[out] EncryptionMask contains the pte mask.
66 GetMemEncryptionAddressMask (
70 return TdSharedPageMask ();
74 Initialize a buffer pool for page table use only.
76 To reduce the potential split operation on page table, the pages reserved for
77 page table should be allocated in the times of PAGE_TABLE_POOL_UNIT_PAGES and
78 at the boundary of PAGE_TABLE_POOL_ALIGNMENT. So the page pool is always
79 initialized with number of pages greater than or equal to the given
82 Once the pages in the pool are used up, this method should be called again to
83 reserve at least another PAGE_TABLE_POOL_UNIT_PAGES. Usually this won't
84 happen often in practice.
86 @param[in] PoolPages The least page number of the pool to be created.
88 @retval TRUE The pool is initialized successfully.
89 @retval FALSE The memory is out of resource.
93 InitializePageTablePool (
100 // Always reserve at least PAGE_TABLE_POOL_UNIT_PAGES, including one page for
103 PoolPages
+= 1; // Add one page for header.
104 PoolPages
= ((PoolPages
- 1) / PAGE_TABLE_POOL_UNIT_PAGES
+ 1) *
105 PAGE_TABLE_POOL_UNIT_PAGES
;
106 Buffer
= AllocateAlignedPages (PoolPages
, PAGE_TABLE_POOL_ALIGNMENT
);
107 if (Buffer
== NULL
) {
108 DEBUG ((DEBUG_ERROR
, "ERROR: Out of aligned pages\r\n"));
113 // Link all pools into a list for easier track later.
115 if (mPageTablePool
== NULL
) {
116 mPageTablePool
= Buffer
;
117 mPageTablePool
->NextPool
= mPageTablePool
;
119 ((PAGE_TABLE_POOL
*)Buffer
)->NextPool
= mPageTablePool
->NextPool
;
120 mPageTablePool
->NextPool
= Buffer
;
121 mPageTablePool
= Buffer
;
125 // Reserve one page for pool header.
127 mPageTablePool
->FreePages
= PoolPages
- 1;
128 mPageTablePool
->Offset
= EFI_PAGES_TO_SIZE (1);
134 This API provides a way to allocate memory for page table.
136 This API can be called more than once to allocate memory for page tables.
138 Allocates the number of 4KB pages and returns a pointer to the allocated
139 buffer. The buffer returned is aligned on a 4KB boundary.
141 If Pages is 0, then NULL is returned.
142 If there is not enough memory remaining to satisfy the request, then NULL is
145 @param Pages The number of 4 KB pages to allocate.
147 @return A pointer to the allocated buffer or NULL if allocation fails.
153 AllocatePageTableMemory (
164 // Renew the pool if necessary.
166 if ((mPageTablePool
== NULL
) ||
167 (Pages
> mPageTablePool
->FreePages
))
169 if (!InitializePageTablePool (Pages
)) {
174 Buffer
= (UINT8
*)mPageTablePool
+ mPageTablePool
->Offset
;
176 mPageTablePool
->Offset
+= EFI_PAGES_TO_SIZE (Pages
);
177 mPageTablePool
->FreePages
-= Pages
;
181 "%a:%a: Buffer=0x%Lx Pages=%ld\n",
194 @param[in] PhysicalAddress Start physical address the 2M page
196 @param[in, out] PageEntry2M Pointer to 2M page entry.
197 @param[in] StackBase Stack base address.
198 @param[in] StackSize Stack size.
204 IN PHYSICAL_ADDRESS PhysicalAddress
,
205 IN OUT UINT64
*PageEntry2M
,
206 IN PHYSICAL_ADDRESS StackBase
,
208 IN UINT64 AddressEncMask
211 PHYSICAL_ADDRESS PhysicalAddress4K
;
212 UINTN IndexOfPageTableEntries
;
213 PAGE_TABLE_4K_ENTRY
*PageTableEntry
, *PageTableEntry1
;
215 PageTableEntry
= AllocatePageTableMemory (1);
217 PageTableEntry1
= PageTableEntry
;
219 if (PageTableEntry
== NULL
) {
224 PhysicalAddress4K
= PhysicalAddress
;
225 for (IndexOfPageTableEntries
= 0;
226 IndexOfPageTableEntries
< 512;
227 (IndexOfPageTableEntries
++,
229 PhysicalAddress4K
+= SIZE_4KB
))
232 // Fill in the Page Table entries
234 PageTableEntry
->Uint64
= (UINT64
)PhysicalAddress4K
| AddressEncMask
;
235 PageTableEntry
->Bits
.ReadWrite
= 1;
236 PageTableEntry
->Bits
.Present
= 1;
237 if ((PhysicalAddress4K
>= StackBase
) &&
238 (PhysicalAddress4K
< StackBase
+ StackSize
))
241 // Set Nx bit for stack.
243 PageTableEntry
->Bits
.Nx
= 1;
248 // Fill in 2M page entry.
250 *PageEntry2M
= ((UINT64
)(UINTN
)PageTableEntry1
|
251 IA32_PG_P
| IA32_PG_RW
| AddressEncMask
);
255 Set one page of page table pool memory to be read-only.
257 @param[in] PageTableBase Base address of page table (CR3).
258 @param[in] Address Start address of a page to be set as read-only.
259 @param[in] Level4Paging Level 4 paging flag.
264 SetPageTablePoolReadOnly (
265 IN UINTN PageTableBase
,
266 IN EFI_PHYSICAL_ADDRESS Address
,
267 IN BOOLEAN Level4Paging
272 UINT64 AddressEncMask
;
273 UINT64 ActiveAddressEncMask
;
274 EFI_PHYSICAL_ADDRESS PhysicalAddress
;
276 UINT64
*NewPageTable
;
284 if (PageTableBase
== 0) {
290 // Since the page table is always from page table pool, which is always
291 // located at the boundary of PcdPageTablePoolAlignment, we just need to
292 // set the whole pool unit to be read-only.
294 Address
= Address
& PAGE_TABLE_POOL_ALIGN_MASK
;
296 LevelShift
[1] = PAGING_L1_ADDRESS_SHIFT
;
297 LevelShift
[2] = PAGING_L2_ADDRESS_SHIFT
;
298 LevelShift
[3] = PAGING_L3_ADDRESS_SHIFT
;
299 LevelShift
[4] = PAGING_L4_ADDRESS_SHIFT
;
301 LevelMask
[1] = PAGING_4K_ADDRESS_MASK_64
;
302 LevelMask
[2] = PAGING_2M_ADDRESS_MASK_64
;
303 LevelMask
[3] = PAGING_1G_ADDRESS_MASK_64
;
304 LevelMask
[4] = PAGING_1G_ADDRESS_MASK_64
;
306 LevelSize
[1] = SIZE_4KB
;
307 LevelSize
[2] = SIZE_2MB
;
308 LevelSize
[3] = SIZE_1GB
;
309 LevelSize
[4] = SIZE_512GB
;
311 AddressEncMask
= GetMemEncryptionAddressMask () &
312 PAGING_1G_ADDRESS_MASK_64
;
313 PageTable
= (UINT64
*)(UINTN
)PageTableBase
;
314 PoolUnitSize
= PAGE_TABLE_POOL_UNIT_SIZE
;
316 for (Level
= (Level4Paging
) ? 4 : 3; Level
> 0; --Level
) {
317 Index
= ((UINTN
)RShiftU64 (Address
, LevelShift
[Level
]));
318 Index
&= PAGING_PAE_INDEX_MASK
;
320 PageAttr
= PageTable
[Index
];
321 ActiveAddressEncMask
= GetMemEncryptionAddressMask () & PageAttr
;
323 if ((PageAttr
& IA32_PG_PS
) == 0) {
325 // Go to next level of table.
327 PageTable
= (UINT64
*)(UINTN
)(PageAttr
& ~AddressEncMask
&
328 PAGING_4K_ADDRESS_MASK_64
);
332 if (PoolUnitSize
>= LevelSize
[Level
]) {
334 // Clear R/W bit if current page granularity is not larger than pool unit
337 if ((PageAttr
& IA32_PG_RW
) != 0) {
338 while (PoolUnitSize
> 0) {
340 // PAGE_TABLE_POOL_UNIT_SIZE and PAGE_TABLE_POOL_ALIGNMENT are fit in
341 // one page (2MB). Then we don't need to update attributes for pages
342 // crossing page directory. ASSERT below is for that purpose.
344 ASSERT (Index
< EFI_PAGE_SIZE
/sizeof (UINT64
));
346 PageTable
[Index
] &= ~(UINT64
)IA32_PG_RW
;
347 PoolUnitSize
-= LevelSize
[Level
];
356 // The smaller granularity of page must be needed.
360 NewPageTable
= AllocatePageTableMemory (1);
361 if (NewPageTable
== NULL
) {
366 PhysicalAddress
= PageAttr
& LevelMask
[Level
];
368 EntryIndex
< EFI_PAGE_SIZE
/sizeof (UINT64
);
371 NewPageTable
[EntryIndex
] = PhysicalAddress
| ActiveAddressEncMask
|
372 IA32_PG_P
| IA32_PG_RW
;
374 NewPageTable
[EntryIndex
] |= IA32_PG_PS
;
377 PhysicalAddress
+= LevelSize
[Level
- 1];
380 PageTable
[Index
] = (UINT64
)(UINTN
)NewPageTable
| ActiveAddressEncMask
|
381 IA32_PG_P
| IA32_PG_RW
;
382 PageTable
= NewPageTable
;
388 Prevent the memory pages used for page table from been overwritten.
390 @param[in] PageTableBase Base address of page table (CR3).
391 @param[in] Level4Paging Level 4 paging flag.
396 EnablePageTableProtection (
397 IN UINTN PageTableBase
,
398 IN BOOLEAN Level4Paging
401 PAGE_TABLE_POOL
*HeadPool
;
402 PAGE_TABLE_POOL
*Pool
;
404 EFI_PHYSICAL_ADDRESS Address
;
406 if (mPageTablePool
== NULL
) {
411 // SetPageTablePoolReadOnly might update mPageTablePool. It's safer to
412 // remember original one in advance.
414 HeadPool
= mPageTablePool
;
417 Address
= (EFI_PHYSICAL_ADDRESS
)(UINTN
)Pool
;
418 PoolSize
= Pool
->Offset
+ EFI_PAGES_TO_SIZE (Pool
->FreePages
);
421 // The size of one pool must be multiple of PAGE_TABLE_POOL_UNIT_SIZE,
422 // which is one of page size of the processor (2MB by default). Let's apply
423 // the protection to them one by one.
425 while (PoolSize
> 0) {
426 SetPageTablePoolReadOnly (PageTableBase
, Address
, Level4Paging
);
427 Address
+= PAGE_TABLE_POOL_UNIT_SIZE
;
428 PoolSize
-= PAGE_TABLE_POOL_UNIT_SIZE
;
431 Pool
= Pool
->NextPool
;
432 } while (Pool
!= HeadPool
);
438 @param[in] PhysicalAddress Start physical address the 1G page
440 @param[in, out] PageEntry1G Pointer to 1G page entry.
441 @param[in] StackBase Stack base address.
442 @param[in] StackSize Stack size.
448 IN PHYSICAL_ADDRESS PhysicalAddress
,
449 IN OUT UINT64
*PageEntry1G
,
450 IN PHYSICAL_ADDRESS StackBase
,
454 PHYSICAL_ADDRESS PhysicalAddress2M
;
455 UINTN IndexOfPageDirectoryEntries
;
456 PAGE_TABLE_ENTRY
*PageDirectoryEntry
;
457 UINT64 AddressEncMask
;
458 UINT64 ActiveAddressEncMask
;
460 PageDirectoryEntry
= AllocatePageTableMemory (1);
461 if (PageDirectoryEntry
== NULL
) {
465 AddressEncMask
= GetMemEncryptionAddressMask ();
466 ASSERT (PageDirectoryEntry
!= NULL
);
468 ActiveAddressEncMask
= *PageEntry1G
& AddressEncMask
;
470 // Fill in 1G page entry.
472 *PageEntry1G
= ((UINT64
)(UINTN
)PageDirectoryEntry
|
473 IA32_PG_P
| IA32_PG_RW
| ActiveAddressEncMask
);
475 PhysicalAddress2M
= PhysicalAddress
;
476 for (IndexOfPageDirectoryEntries
= 0;
477 IndexOfPageDirectoryEntries
< 512;
478 (IndexOfPageDirectoryEntries
++,
479 PageDirectoryEntry
++,
480 PhysicalAddress2M
+= SIZE_2MB
))
482 if ((PhysicalAddress2M
< StackBase
+ StackSize
) &&
483 ((PhysicalAddress2M
+ SIZE_2MB
) > StackBase
))
486 // Need to split this 2M page that covers stack range.
490 (UINT64
*)PageDirectoryEntry
,
497 // Fill in the Page Directory entries
499 PageDirectoryEntry
->Uint64
= (UINT64
)PhysicalAddress2M
| ActiveAddressEncMask
;
500 PageDirectoryEntry
->Bits
.ReadWrite
= 1;
501 PageDirectoryEntry
->Bits
.Present
= 1;
502 PageDirectoryEntry
->Bits
.MustBe1
= 1;
508 Set or Clear the memory shared bit
510 @param[in] PagetablePoint Page table entry pointer (PTE).
511 @param[in] Mode Set or Clear shared bit
515 SetOrClearSharedBit (
516 IN OUT UINT64
*PageTablePointer
,
517 IN TDX_PAGETABLE_MODE Mode
,
518 IN PHYSICAL_ADDRESS PhysicalAddress
,
522 UINT64 AddressEncMask
;
524 EDKII_MEMORY_ACCEPT_PROTOCOL
*MemoryAcceptProtocol
;
526 AddressEncMask
= GetMemEncryptionAddressMask ();
529 // Set or clear page table entry. Also, set shared bit in physical address, before calling MapGPA
531 if (Mode
== SetSharedBit
) {
532 *PageTablePointer
|= AddressEncMask
;
533 PhysicalAddress
|= AddressEncMask
;
535 *PageTablePointer
&= ~AddressEncMask
;
536 PhysicalAddress
&= ~AddressEncMask
;
539 Status
= TdVmCall (TDVMCALL_MAPGPA
, PhysicalAddress
, Length
, 0, 0, NULL
);
542 // If changing shared to private, must accept-page again
544 if (Mode
== ClearSharedBit
) {
545 Status
= gBS
->LocateProtocol (&gEdkiiMemoryAcceptProtocolGuid
, NULL
, (VOID
**)&MemoryAcceptProtocol
);
546 ASSERT (!EFI_ERROR (Status
));
547 Status
= MemoryAcceptProtocol
->AcceptMemory (MemoryAcceptProtocol
, PhysicalAddress
, Length
);
548 ASSERT (!EFI_ERROR (Status
));
553 "%a:%a: pte=0x%Lx AddressEncMask=0x%Lx Mode=0x%x MapGPA Status=0x%x\n",
564 Check the WP status in CR0 register. This bit is used to lock or unlock write
565 access to pages marked as read-only.
567 @retval TRUE Write protection is enabled.
568 @retval FALSE Write protection is disabled.
572 IsReadOnlyPageWriteProtected (
576 return ((AsmReadCr0 () & BIT16
) != 0);
580 Disable Write Protect on pages marked as read-only.
584 DisableReadOnlyPageWriteProtect (
588 AsmWriteCr0 (AsmReadCr0 () & ~BIT16
);
592 Enable Write Protect on pages marked as read-only.
595 EnableReadOnlyPageWriteProtect (
599 AsmWriteCr0 (AsmReadCr0 () | BIT16
);
603 This function either sets or clears memory encryption for the memory
604 region specified by PhysicalAddress and Length from the current page table
607 The function iterates through the PhysicalAddress one page at a time, and set
608 or clears the memory encryption in the page table. If it encounters
609 that a given physical address range is part of large page then it attempts to
610 change the attribute at one go (based on size), otherwise it splits the
611 large pages into smaller (e.g 2M page into 4K pages) and then try to set or
612 clear the shared bit on the smallest page size.
614 @param[in] Cr3BaseAddress Cr3 Base Address (if zero then use
616 @param[in] PhysicalAddress The physical address that is the start
617 address of a memory region.
618 @param[in] Length The length of memory region
619 @param[in] Mode Set or Clear mode
621 @retval RETURN_SUCCESS The attributes were cleared for the
623 @retval RETURN_INVALID_PARAMETER Number of pages is zero.
624 @retval RETURN_UNSUPPORTED Setting the memory encyrption attribute
630 SetMemorySharedOrPrivate (
631 IN PHYSICAL_ADDRESS Cr3BaseAddress
,
632 IN PHYSICAL_ADDRESS PhysicalAddress
,
634 IN TDX_PAGETABLE_MODE Mode
637 PAGE_MAP_AND_DIRECTORY_POINTER
*PageMapLevel4Entry
;
638 PAGE_MAP_AND_DIRECTORY_POINTER
*PageUpperDirectoryPointerEntry
;
639 PAGE_MAP_AND_DIRECTORY_POINTER
*PageDirectoryPointerEntry
;
640 PAGE_TABLE_1G_ENTRY
*PageDirectory1GEntry
;
641 PAGE_TABLE_ENTRY
*PageDirectory2MEntry
;
642 PAGE_TABLE_4K_ENTRY
*PageTableEntry
;
644 UINT64 AddressEncMask
;
645 UINT64 ActiveEncMask
;
647 RETURN_STATUS Status
;
649 BOOLEAN Page5LevelSupport
;
652 // Set PageMapLevel4Entry to suppress incorrect compiler/analyzer warnings.
654 PageMapLevel4Entry
= NULL
;
658 "%a:%a: Cr3Base=0x%Lx Physical=0x%Lx Length=0x%Lx Mode=%a\n",
664 (Mode
== SetSharedBit
) ? "Shared" : "Private"
668 // Check if we have a valid memory encryption mask
670 AddressEncMask
= GetMemEncryptionAddressMask ();
672 PgTableMask
= AddressEncMask
| EFI_PAGE_MASK
;
675 return RETURN_INVALID_PARAMETER
;
679 // Make sure that the page table is changeable.
681 IsWpEnabled
= IsReadOnlyPageWriteProtected ();
683 DisableReadOnlyPageWriteProtect ();
687 // If Cr3BaseAddress is not specified then read the current CR3
689 if (Cr3BaseAddress
== 0) {
690 Cr3BaseAddress
= AsmReadCr3 ();
694 // CPU will already have LA57 enabled so just check CR4
696 Cr4
.UintN
= AsmReadCr4 ();
698 Page5LevelSupport
= (Cr4
.Bits
.LA57
? TRUE
: FALSE
);
700 // If 5-level pages, adjust Cr3BaseAddress to point to first 4-level page directory,
701 // we will only have 1
703 if (Page5LevelSupport
) {
704 Cr3BaseAddress
= *(UINT64
*)Cr3BaseAddress
& ~PgTableMask
;
707 Status
= EFI_SUCCESS
;
710 PageMapLevel4Entry
= (VOID
*)(Cr3BaseAddress
& ~PgTableMask
);
711 PageMapLevel4Entry
+= PML4_OFFSET (PhysicalAddress
);
712 if (!PageMapLevel4Entry
->Bits
.Present
) {
715 "%a:%a: bad PML4 for Physical=0x%Lx\n",
720 Status
= RETURN_NO_MAPPING
;
724 PageDirectory1GEntry
= (VOID
*)(
725 (PageMapLevel4Entry
->Bits
.PageTableBaseAddress
<<
728 PageDirectory1GEntry
+= PDP_OFFSET (PhysicalAddress
);
729 if (!PageDirectory1GEntry
->Bits
.Present
) {
732 "%a:%a: bad PDPE for Physical=0x%Lx\n",
737 Status
= RETURN_NO_MAPPING
;
742 // If the MustBe1 bit is not 1, it's not actually a 1GB entry
744 if (PageDirectory1GEntry
->Bits
.MustBe1
) {
747 // If we have at least 1GB to go, we can just update this entry
749 if (!(PhysicalAddress
& (BIT30
- 1)) && (Length
>= BIT30
)) {
750 SetOrClearSharedBit (&PageDirectory1GEntry
->Uint64
, Mode
, PhysicalAddress
, BIT30
);
753 "%a:%a: updated 1GB entry for Physical=0x%Lx\n",
758 PhysicalAddress
+= BIT30
;
762 // We must split the page
766 "%a:%a: splitting 1GB page for Physical=0x%Lx\n",
772 (UINT64
)PageDirectory1GEntry
->Bits
.PageTableBaseAddress
<< 30,
773 (UINT64
*)PageDirectory1GEntry
,
783 PageUpperDirectoryPointerEntry
=
784 (PAGE_MAP_AND_DIRECTORY_POINTER
*)PageDirectory1GEntry
;
785 PageDirectory2MEntry
=
787 (PageUpperDirectoryPointerEntry
->Bits
.PageTableBaseAddress
<<
790 PageDirectory2MEntry
+= PDE_OFFSET (PhysicalAddress
);
791 if (!PageDirectory2MEntry
->Bits
.Present
) {
794 "%a:%a: bad PDE for Physical=0x%Lx\n",
799 Status
= RETURN_NO_MAPPING
;
804 // If the MustBe1 bit is not a 1, it's not a 2MB entry
806 if (PageDirectory2MEntry
->Bits
.MustBe1
) {
809 // If we have at least 2MB left to go, we can just update this entry
811 if (!(PhysicalAddress
& (BIT21
-1)) && (Length
>= BIT21
)) {
812 SetOrClearSharedBit (&PageDirectory2MEntry
->Uint64
, Mode
, PhysicalAddress
, BIT21
);
813 PhysicalAddress
+= BIT21
;
817 // We must split up this page into 4K pages
821 "%a:%a: splitting 2MB page for Physical=0x%Lx\n",
827 ActiveEncMask
= PageDirectory2MEntry
->Uint64
& AddressEncMask
;
830 (UINT64
)PageDirectory2MEntry
->Bits
.PageTableBaseAddress
<< 21,
831 (UINT64
*)PageDirectory2MEntry
,
839 PageDirectoryPointerEntry
=
840 (PAGE_MAP_AND_DIRECTORY_POINTER
*)PageDirectory2MEntry
;
843 (PageDirectoryPointerEntry
->Bits
.PageTableBaseAddress
<<
846 PageTableEntry
+= PTE_OFFSET (PhysicalAddress
);
847 if (!PageTableEntry
->Bits
.Present
) {
850 "%a:%a: bad PTE for Physical=0x%Lx\n",
855 Status
= RETURN_NO_MAPPING
;
859 SetOrClearSharedBit (&PageTableEntry
->Uint64
, Mode
, PhysicalAddress
, EFI_PAGE_SIZE
);
860 PhysicalAddress
+= EFI_PAGE_SIZE
;
861 Length
-= EFI_PAGE_SIZE
;
867 // Protect the page table by marking the memory used for page table to be
871 EnablePageTableProtection ((UINTN
)PageMapLevel4Entry
, TRUE
);
881 // Restore page table write protection, if any.
884 EnableReadOnlyPageWriteProtect ();
891 This function clears memory shared bit for the memory region specified by
892 BaseAddress and NumPages from the current page table context.
894 @param[in] Cr3BaseAddress Cr3 Base Address (if zero then use
896 @param[in] BaseAddress The physical address that is the start
897 address of a memory region.
898 @param[in] NumPages The number of pages from start memory
901 @retval RETURN_SUCCESS The attributes were cleared for the
903 @retval RETURN_INVALID_PARAMETER Number of pages is zero.
904 @retval RETURN_UNSUPPORTED Clearing the memory encryption attribute
909 MemEncryptTdxSetPageSharedBit (
910 IN PHYSICAL_ADDRESS Cr3BaseAddress
,
911 IN PHYSICAL_ADDRESS BaseAddress
,
915 return SetMemorySharedOrPrivate (
918 EFI_PAGES_TO_SIZE (NumPages
),
924 This function sets memory shared bit for the memory region specified by
925 BaseAddress and NumPages from the current page table context.
927 @param[in] Cr3BaseAddress Cr3 Base Address (if zero then use
929 @param[in] BaseAddress The physical address that is the start
930 address of a memory region.
931 @param[in] NumPages The number of pages from start memory
934 @retval RETURN_SUCCESS The attributes were set for the memory
936 @retval RETURN_INVALID_PARAMETER Number of pages is zero.
937 @retval RETURN_UNSUPPORTED Setting the memory encryption attribute
942 MemEncryptTdxClearPageSharedBit (
943 IN PHYSICAL_ADDRESS Cr3BaseAddress
,
944 IN PHYSICAL_ADDRESS BaseAddress
,
948 return SetMemorySharedOrPrivate (
951 EFI_PAGES_TO_SIZE (NumPages
),