]> git.proxmox.com Git - mirror_edk2.git/blob - OvmfPkg/Library/BaseMemEncryptTdxLib/MemoryEncryption.c
OvmfPkg/BaseMemEncryptTdxLib: Add TDX helper library
[mirror_edk2.git] / OvmfPkg / Library / BaseMemEncryptTdxLib / MemoryEncryption.c
1 /** @file
2
3 Virtual Memory Management Services to set or clear the memory encryption.
4
5 Copyright (c) 2006 - 2018, Intel Corporation. All rights reserved.<BR>
6 Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
7
8 SPDX-License-Identifier: BSD-2-Clause-Patent
9
10 Code is derived from MdeModulePkg/Core/DxeIplPeim/X64/VirtualMemory.c
11
12 Note:
13 There a lot of duplicated codes for Page Table operations. These
14 codes should be moved to a common library (PageTablesLib) so that it is
15 more friendly for review and maintain. There is a new feature requirement
16 https://bugzilla.tianocore.org/show_bug.cgi?id=847 which is to implement
17 the library. After the lib is introduced, this file will be refactored.
18
19 **/
20
21 #include <Uefi.h>
22 #include <Uefi/UefiBaseType.h>
23 #include <Library/CpuLib.h>
24 #include <Library/BaseLib.h>
25 #include <Library/DebugLib.h>
26 #include <Library/MemEncryptTdxLib.h>
27 #include "VirtualMemory.h"
28 #include <IndustryStandard/Tdx.h>
29 #include <Library/TdxLib.h>
30 #include <ConfidentialComputingGuestAttr.h>
31
32 typedef enum {
33 SetSharedBit,
34 ClearSharedBit
35 } TDX_PAGETABLE_MODE;
36
37 STATIC PAGE_TABLE_POOL *mPageTablePool = NULL;
38
39 /**
40 Returns boolean to indicate whether to indicate which, if any, memory encryption is enabled
41
42 @param[in] Type Bitmask of encryption technologies to check is enabled
43
44 @retval TRUE The encryption type(s) are enabled
45 @retval FALSE The encryption type(s) are not enabled
46 **/
47 BOOLEAN
48 EFIAPI
49 MemEncryptTdxIsEnabled (
50 VOID
51 )
52 {
53 return CC_GUEST_IS_TDX (PcdGet64 (PcdConfidentialComputingGuestAttr));
54 }
55
56 /**
57 Get the memory encryption mask
58
59 @param[out] EncryptionMask contains the pte mask.
60
61 **/
62 STATIC
63 UINT64
64 GetMemEncryptionAddressMask (
65 VOID
66 )
67 {
68 return TdSharedPageMask ();
69 }
70
71 /**
72 Initialize a buffer pool for page table use only.
73
74 To reduce the potential split operation on page table, the pages reserved for
75 page table should be allocated in the times of PAGE_TABLE_POOL_UNIT_PAGES and
76 at the boundary of PAGE_TABLE_POOL_ALIGNMENT. So the page pool is always
77 initialized with number of pages greater than or equal to the given
78 PoolPages.
79
80 Once the pages in the pool are used up, this method should be called again to
81 reserve at least another PAGE_TABLE_POOL_UNIT_PAGES. Usually this won't
82 happen often in practice.
83
84 @param[in] PoolPages The least page number of the pool to be created.
85
86 @retval TRUE The pool is initialized successfully.
87 @retval FALSE The memory is out of resource.
88 **/
89 STATIC
90 BOOLEAN
91 InitializePageTablePool (
92 IN UINTN PoolPages
93 )
94 {
95 VOID *Buffer;
96
97 //
98 // Always reserve at least PAGE_TABLE_POOL_UNIT_PAGES, including one page for
99 // header.
100 //
101 PoolPages += 1; // Add one page for header.
102 PoolPages = ((PoolPages - 1) / PAGE_TABLE_POOL_UNIT_PAGES + 1) *
103 PAGE_TABLE_POOL_UNIT_PAGES;
104 Buffer = AllocateAlignedPages (PoolPages, PAGE_TABLE_POOL_ALIGNMENT);
105 if (Buffer == NULL) {
106 DEBUG ((DEBUG_ERROR, "ERROR: Out of aligned pages\r\n"));
107 return FALSE;
108 }
109
110 //
111 // Link all pools into a list for easier track later.
112 //
113 if (mPageTablePool == NULL) {
114 mPageTablePool = Buffer;
115 mPageTablePool->NextPool = mPageTablePool;
116 } else {
117 ((PAGE_TABLE_POOL *)Buffer)->NextPool = mPageTablePool->NextPool;
118 mPageTablePool->NextPool = Buffer;
119 mPageTablePool = Buffer;
120 }
121
122 //
123 // Reserve one page for pool header.
124 //
125 mPageTablePool->FreePages = PoolPages - 1;
126 mPageTablePool->Offset = EFI_PAGES_TO_SIZE (1);
127
128 return TRUE;
129 }
130
131 /**
132 This API provides a way to allocate memory for page table.
133
134 This API can be called more than once to allocate memory for page tables.
135
136 Allocates the number of 4KB pages and returns a pointer to the allocated
137 buffer. The buffer returned is aligned on a 4KB boundary.
138
139 If Pages is 0, then NULL is returned.
140 If there is not enough memory remaining to satisfy the request, then NULL is
141 returned.
142
143 @param Pages The number of 4 KB pages to allocate.
144
145 @return A pointer to the allocated buffer or NULL if allocation fails.
146
147 **/
148 STATIC
149 VOID *
150 EFIAPI
151 AllocatePageTableMemory (
152 IN UINTN Pages
153 )
154 {
155 VOID *Buffer;
156
157 if (Pages == 0) {
158 return NULL;
159 }
160
161 //
162 // Renew the pool if necessary.
163 //
164 if ((mPageTablePool == NULL) ||
165 (Pages > mPageTablePool->FreePages))
166 {
167 if (!InitializePageTablePool (Pages)) {
168 return NULL;
169 }
170 }
171
172 Buffer = (UINT8 *)mPageTablePool + mPageTablePool->Offset;
173
174 mPageTablePool->Offset += EFI_PAGES_TO_SIZE (Pages);
175 mPageTablePool->FreePages -= Pages;
176
177 DEBUG ((
178 DEBUG_VERBOSE,
179 "%a:%a: Buffer=0x%Lx Pages=%ld\n",
180 gEfiCallerBaseName,
181 __FUNCTION__,
182 Buffer,
183 Pages
184 ));
185
186 return Buffer;
187 }
188
189 /**
190 Split 2M page to 4K.
191
192 @param[in] PhysicalAddress Start physical address the 2M page
193 covered.
194 @param[in, out] PageEntry2M Pointer to 2M page entry.
195 @param[in] StackBase Stack base address.
196 @param[in] StackSize Stack size.
197
198 **/
199 STATIC
200 VOID
201 Split2MPageTo4K (
202 IN PHYSICAL_ADDRESS PhysicalAddress,
203 IN OUT UINT64 *PageEntry2M,
204 IN PHYSICAL_ADDRESS StackBase,
205 IN UINTN StackSize,
206 IN UINT64 AddressEncMask
207 )
208 {
209 PHYSICAL_ADDRESS PhysicalAddress4K;
210 UINTN IndexOfPageTableEntries;
211 PAGE_TABLE_4K_ENTRY *PageTableEntry, *PageTableEntry1;
212
213 PageTableEntry = AllocatePageTableMemory (1);
214
215 PageTableEntry1 = PageTableEntry;
216
217 if (PageTableEntry == NULL) {
218 ASSERT (FALSE);
219 return;
220 }
221
222 PhysicalAddress4K = PhysicalAddress;
223 for (IndexOfPageTableEntries = 0;
224 IndexOfPageTableEntries < 512;
225 (IndexOfPageTableEntries++,
226 PageTableEntry++,
227 PhysicalAddress4K += SIZE_4KB))
228 {
229 //
230 // Fill in the Page Table entries
231 //
232 PageTableEntry->Uint64 = (UINT64)PhysicalAddress4K | AddressEncMask;
233 PageTableEntry->Bits.ReadWrite = 1;
234 PageTableEntry->Bits.Present = 1;
235 if ((PhysicalAddress4K >= StackBase) &&
236 (PhysicalAddress4K < StackBase + StackSize))
237 {
238 //
239 // Set Nx bit for stack.
240 //
241 PageTableEntry->Bits.Nx = 1;
242 }
243 }
244
245 //
246 // Fill in 2M page entry.
247 //
248 *PageEntry2M = ((UINT64)(UINTN)PageTableEntry1 |
249 IA32_PG_P | IA32_PG_RW | AddressEncMask);
250 }
251
252 /**
253 Set one page of page table pool memory to be read-only.
254
255 @param[in] PageTableBase Base address of page table (CR3).
256 @param[in] Address Start address of a page to be set as read-only.
257 @param[in] Level4Paging Level 4 paging flag.
258
259 **/
260 STATIC
261 VOID
262 SetPageTablePoolReadOnly (
263 IN UINTN PageTableBase,
264 IN EFI_PHYSICAL_ADDRESS Address,
265 IN BOOLEAN Level4Paging
266 )
267 {
268 UINTN Index;
269 UINTN EntryIndex;
270 UINT64 AddressEncMask;
271 UINT64 ActiveAddressEncMask;
272 EFI_PHYSICAL_ADDRESS PhysicalAddress;
273 UINT64 *PageTable;
274 UINT64 *NewPageTable;
275 UINT64 PageAttr;
276 UINT64 LevelSize[5];
277 UINT64 LevelMask[5];
278 UINTN LevelShift[5];
279 UINTN Level;
280 UINT64 PoolUnitSize;
281
282 if (PageTableBase == 0) {
283 ASSERT (FALSE);
284 return;
285 }
286
287 //
288 // Since the page table is always from page table pool, which is always
289 // located at the boundary of PcdPageTablePoolAlignment, we just need to
290 // set the whole pool unit to be read-only.
291 //
292 Address = Address & PAGE_TABLE_POOL_ALIGN_MASK;
293
294 LevelShift[1] = PAGING_L1_ADDRESS_SHIFT;
295 LevelShift[2] = PAGING_L2_ADDRESS_SHIFT;
296 LevelShift[3] = PAGING_L3_ADDRESS_SHIFT;
297 LevelShift[4] = PAGING_L4_ADDRESS_SHIFT;
298
299 LevelMask[1] = PAGING_4K_ADDRESS_MASK_64;
300 LevelMask[2] = PAGING_2M_ADDRESS_MASK_64;
301 LevelMask[3] = PAGING_1G_ADDRESS_MASK_64;
302 LevelMask[4] = PAGING_1G_ADDRESS_MASK_64;
303
304 LevelSize[1] = SIZE_4KB;
305 LevelSize[2] = SIZE_2MB;
306 LevelSize[3] = SIZE_1GB;
307 LevelSize[4] = SIZE_512GB;
308
309 AddressEncMask = GetMemEncryptionAddressMask () &
310 PAGING_1G_ADDRESS_MASK_64;
311 PageTable = (UINT64 *)(UINTN)PageTableBase;
312 PoolUnitSize = PAGE_TABLE_POOL_UNIT_SIZE;
313
314 for (Level = (Level4Paging) ? 4 : 3; Level > 0; --Level) {
315 Index = ((UINTN)RShiftU64 (Address, LevelShift[Level]));
316 Index &= PAGING_PAE_INDEX_MASK;
317
318 PageAttr = PageTable[Index];
319 ActiveAddressEncMask = GetMemEncryptionAddressMask () & PageAttr;
320
321 if ((PageAttr & IA32_PG_PS) == 0) {
322 //
323 // Go to next level of table.
324 //
325 PageTable = (UINT64 *)(UINTN)(PageAttr & ~AddressEncMask &
326 PAGING_4K_ADDRESS_MASK_64);
327 continue;
328 }
329
330 if (PoolUnitSize >= LevelSize[Level]) {
331 //
332 // Clear R/W bit if current page granularity is not larger than pool unit
333 // size.
334 //
335 if ((PageAttr & IA32_PG_RW) != 0) {
336 while (PoolUnitSize > 0) {
337 //
338 // PAGE_TABLE_POOL_UNIT_SIZE and PAGE_TABLE_POOL_ALIGNMENT are fit in
339 // one page (2MB). Then we don't need to update attributes for pages
340 // crossing page directory. ASSERT below is for that purpose.
341 //
342 ASSERT (Index < EFI_PAGE_SIZE/sizeof (UINT64));
343
344 PageTable[Index] &= ~(UINT64)IA32_PG_RW;
345 PoolUnitSize -= LevelSize[Level];
346
347 ++Index;
348 }
349 }
350
351 break;
352 } else {
353 //
354 // The smaller granularity of page must be needed.
355 //
356 ASSERT (Level > 1);
357
358 NewPageTable = AllocatePageTableMemory (1);
359 if (NewPageTable == NULL) {
360 ASSERT (FALSE);
361 return;
362 }
363
364 PhysicalAddress = PageAttr & LevelMask[Level];
365 for (EntryIndex = 0;
366 EntryIndex < EFI_PAGE_SIZE/sizeof (UINT64);
367 ++EntryIndex)
368 {
369 NewPageTable[EntryIndex] = PhysicalAddress | ActiveAddressEncMask |
370 IA32_PG_P | IA32_PG_RW;
371 if (Level > 2) {
372 NewPageTable[EntryIndex] |= IA32_PG_PS;
373 }
374
375 PhysicalAddress += LevelSize[Level - 1];
376 }
377
378 PageTable[Index] = (UINT64)(UINTN)NewPageTable | ActiveAddressEncMask |
379 IA32_PG_P | IA32_PG_RW;
380 PageTable = NewPageTable;
381 }
382 }
383 }
384
385 /**
386 Prevent the memory pages used for page table from been overwritten.
387
388 @param[in] PageTableBase Base address of page table (CR3).
389 @param[in] Level4Paging Level 4 paging flag.
390
391 **/
392 STATIC
393 VOID
394 EnablePageTableProtection (
395 IN UINTN PageTableBase,
396 IN BOOLEAN Level4Paging
397 )
398 {
399 PAGE_TABLE_POOL *HeadPool;
400 PAGE_TABLE_POOL *Pool;
401 UINT64 PoolSize;
402 EFI_PHYSICAL_ADDRESS Address;
403
404 if (mPageTablePool == NULL) {
405 return;
406 }
407
408 //
409 // SetPageTablePoolReadOnly might update mPageTablePool. It's safer to
410 // remember original one in advance.
411 //
412 HeadPool = mPageTablePool;
413 Pool = HeadPool;
414 do {
415 Address = (EFI_PHYSICAL_ADDRESS)(UINTN)Pool;
416 PoolSize = Pool->Offset + EFI_PAGES_TO_SIZE (Pool->FreePages);
417
418 //
419 // The size of one pool must be multiple of PAGE_TABLE_POOL_UNIT_SIZE,
420 // which is one of page size of the processor (2MB by default). Let's apply
421 // the protection to them one by one.
422 //
423 while (PoolSize > 0) {
424 SetPageTablePoolReadOnly (PageTableBase, Address, Level4Paging);
425 Address += PAGE_TABLE_POOL_UNIT_SIZE;
426 PoolSize -= PAGE_TABLE_POOL_UNIT_SIZE;
427 }
428
429 Pool = Pool->NextPool;
430 } while (Pool != HeadPool);
431 }
432
433 /**
434 Split 1G page to 2M.
435
436 @param[in] PhysicalAddress Start physical address the 1G page
437 covered.
438 @param[in, out] PageEntry1G Pointer to 1G page entry.
439 @param[in] StackBase Stack base address.
440 @param[in] StackSize Stack size.
441
442 **/
443 STATIC
444 VOID
445 Split1GPageTo2M (
446 IN PHYSICAL_ADDRESS PhysicalAddress,
447 IN OUT UINT64 *PageEntry1G,
448 IN PHYSICAL_ADDRESS StackBase,
449 IN UINTN StackSize
450 )
451 {
452 PHYSICAL_ADDRESS PhysicalAddress2M;
453 UINTN IndexOfPageDirectoryEntries;
454 PAGE_TABLE_ENTRY *PageDirectoryEntry;
455 UINT64 AddressEncMask;
456 UINT64 ActiveAddressEncMask;
457
458 PageDirectoryEntry = AllocatePageTableMemory (1);
459 if (PageDirectoryEntry == NULL) {
460 return;
461 }
462
463 AddressEncMask = GetMemEncryptionAddressMask ();
464 ASSERT (PageDirectoryEntry != NULL);
465
466 ActiveAddressEncMask = *PageEntry1G & AddressEncMask;
467 //
468 // Fill in 1G page entry.
469 //
470 *PageEntry1G = ((UINT64)(UINTN)PageDirectoryEntry |
471 IA32_PG_P | IA32_PG_RW | ActiveAddressEncMask);
472
473 PhysicalAddress2M = PhysicalAddress;
474 for (IndexOfPageDirectoryEntries = 0;
475 IndexOfPageDirectoryEntries < 512;
476 (IndexOfPageDirectoryEntries++,
477 PageDirectoryEntry++,
478 PhysicalAddress2M += SIZE_2MB))
479 {
480 if ((PhysicalAddress2M < StackBase + StackSize) &&
481 ((PhysicalAddress2M + SIZE_2MB) > StackBase))
482 {
483 //
484 // Need to split this 2M page that covers stack range.
485 //
486 Split2MPageTo4K (
487 PhysicalAddress2M,
488 (UINT64 *)PageDirectoryEntry,
489 StackBase,
490 StackSize,
491 ActiveAddressEncMask
492 );
493 } else {
494 //
495 // Fill in the Page Directory entries
496 //
497 PageDirectoryEntry->Uint64 = (UINT64)PhysicalAddress2M | ActiveAddressEncMask;
498 PageDirectoryEntry->Bits.ReadWrite = 1;
499 PageDirectoryEntry->Bits.Present = 1;
500 PageDirectoryEntry->Bits.MustBe1 = 1;
501 }
502 }
503 }
504
505 /**
506 Set or Clear the memory shared bit
507
508 @param[in] PagetablePoint Page table entry pointer (PTE).
509 @param[in] Mode Set or Clear shared bit
510
511 **/
512 STATIC VOID
513 SetOrClearSharedBit (
514 IN OUT UINT64 *PageTablePointer,
515 IN TDX_PAGETABLE_MODE Mode,
516 IN PHYSICAL_ADDRESS PhysicalAddress,
517 IN UINT64 Length
518 )
519 {
520 UINT64 AddressEncMask;
521 UINT64 Status;
522
523 AddressEncMask = GetMemEncryptionAddressMask ();
524
525 //
526 // Set or clear page table entry. Also, set shared bit in physical address, before calling MapGPA
527 //
528 if (Mode == SetSharedBit) {
529 *PageTablePointer |= AddressEncMask;
530 PhysicalAddress |= AddressEncMask;
531 } else {
532 *PageTablePointer &= ~AddressEncMask;
533 PhysicalAddress &= ~AddressEncMask;
534 }
535
536 Status = TdVmCall (TDVMCALL_MAPGPA, PhysicalAddress, Length, 0, 0, NULL);
537
538 //
539 // If changing shared to private, must accept-page again
540 //
541 if (Mode == ClearSharedBit) {
542 TdAcceptPages (PhysicalAddress, Length / EFI_PAGE_SIZE, EFI_PAGE_SIZE);
543 }
544
545 DEBUG ((
546 DEBUG_VERBOSE,
547 "%a:%a: pte=0x%Lx AddressEncMask=0x%Lx Mode=0x%x MapGPA Status=0x%x\n",
548 gEfiCallerBaseName,
549 __FUNCTION__,
550 *PageTablePointer,
551 AddressEncMask,
552 Mode,
553 Status
554 ));
555 }
556
557 /**
558 Check the WP status in CR0 register. This bit is used to lock or unlock write
559 access to pages marked as read-only.
560
561 @retval TRUE Write protection is enabled.
562 @retval FALSE Write protection is disabled.
563 **/
564 STATIC
565 BOOLEAN
566 IsReadOnlyPageWriteProtected (
567 VOID
568 )
569 {
570 return ((AsmReadCr0 () & BIT16) != 0);
571 }
572
573 /**
574 Disable Write Protect on pages marked as read-only.
575 **/
576 STATIC
577 VOID
578 DisableReadOnlyPageWriteProtect (
579 VOID
580 )
581 {
582 AsmWriteCr0 (AsmReadCr0 () & ~BIT16);
583 }
584
585 /**
586 Enable Write Protect on pages marked as read-only.
587 **/
588 VOID
589 EnableReadOnlyPageWriteProtect (
590 VOID
591 )
592 {
593 AsmWriteCr0 (AsmReadCr0 () | BIT16);
594 }
595
596 /**
597 This function either sets or clears memory encryption for the memory
598 region specified by PhysicalAddress and Length from the current page table
599 context.
600
601 The function iterates through the PhysicalAddress one page at a time, and set
602 or clears the memory encryption in the page table. If it encounters
603 that a given physical address range is part of large page then it attempts to
604 change the attribute at one go (based on size), otherwise it splits the
605 large pages into smaller (e.g 2M page into 4K pages) and then try to set or
606 clear the shared bit on the smallest page size.
607
608 @param[in] Cr3BaseAddress Cr3 Base Address (if zero then use
609 current CR3)
610 @param[in] PhysicalAddress The physical address that is the start
611 address of a memory region.
612 @param[in] Length The length of memory region
613 @param[in] Mode Set or Clear mode
614
615 @retval RETURN_SUCCESS The attributes were cleared for the
616 memory region.
617 @retval RETURN_INVALID_PARAMETER Number of pages is zero.
618 @retval RETURN_UNSUPPORTED Setting the memory encyrption attribute
619 is not supported
620 **/
621 STATIC
622 RETURN_STATUS
623 EFIAPI
624 SetMemorySharedOrPrivate (
625 IN PHYSICAL_ADDRESS Cr3BaseAddress,
626 IN PHYSICAL_ADDRESS PhysicalAddress,
627 IN UINTN Length,
628 IN TDX_PAGETABLE_MODE Mode
629 )
630 {
631 PAGE_MAP_AND_DIRECTORY_POINTER *PageMapLevel4Entry;
632 PAGE_MAP_AND_DIRECTORY_POINTER *PageUpperDirectoryPointerEntry;
633 PAGE_MAP_AND_DIRECTORY_POINTER *PageDirectoryPointerEntry;
634 PAGE_TABLE_1G_ENTRY *PageDirectory1GEntry;
635 PAGE_TABLE_ENTRY *PageDirectory2MEntry;
636 PAGE_TABLE_4K_ENTRY *PageTableEntry;
637 UINT64 PgTableMask;
638 UINT64 AddressEncMask;
639 UINT64 ActiveEncMask;
640 BOOLEAN IsWpEnabled;
641 RETURN_STATUS Status;
642 IA32_CR4 Cr4;
643 BOOLEAN Page5LevelSupport;
644
645 //
646 // Set PageMapLevel4Entry to suppress incorrect compiler/analyzer warnings.
647 //
648 PageMapLevel4Entry = NULL;
649
650 DEBUG ((
651 DEBUG_VERBOSE,
652 "%a:%a: Cr3Base=0x%Lx Physical=0x%Lx Length=0x%Lx Mode=%a\n",
653 gEfiCallerBaseName,
654 __FUNCTION__,
655 Cr3BaseAddress,
656 PhysicalAddress,
657 (UINT64)Length,
658 (Mode == SetSharedBit) ? "Shared" : "Private"
659 ));
660
661 //
662 // Check if we have a valid memory encryption mask
663 //
664 AddressEncMask = GetMemEncryptionAddressMask ();
665
666 PgTableMask = AddressEncMask | EFI_PAGE_MASK;
667
668 if (Length == 0) {
669 return RETURN_INVALID_PARAMETER;
670 }
671
672 //
673 // Make sure that the page table is changeable.
674 //
675 IsWpEnabled = IsReadOnlyPageWriteProtected ();
676 if (IsWpEnabled) {
677 DisableReadOnlyPageWriteProtect ();
678 }
679
680 //
681 // If Cr3BaseAddress is not specified then read the current CR3
682 //
683 if (Cr3BaseAddress == 0) {
684 Cr3BaseAddress = AsmReadCr3 ();
685 }
686
687 //
688 // CPU will already have LA57 enabled so just check CR4
689 //
690 Cr4.UintN = AsmReadCr4 ();
691
692 Page5LevelSupport = (Cr4.Bits.LA57 ? TRUE : FALSE);
693 //
694 // If 5-level pages, adjust Cr3BaseAddress to point to first 4-level page directory,
695 // we will only have 1
696 //
697 if (Page5LevelSupport) {
698 Cr3BaseAddress = *(UINT64 *)Cr3BaseAddress & ~PgTableMask;
699 }
700
701 Status = EFI_SUCCESS;
702
703 while (Length) {
704 PageMapLevel4Entry = (VOID *)(Cr3BaseAddress & ~PgTableMask);
705 PageMapLevel4Entry += PML4_OFFSET (PhysicalAddress);
706 if (!PageMapLevel4Entry->Bits.Present) {
707 DEBUG ((
708 DEBUG_ERROR,
709 "%a:%a: bad PML4 for Physical=0x%Lx\n",
710 gEfiCallerBaseName,
711 __FUNCTION__,
712 PhysicalAddress
713 ));
714 Status = RETURN_NO_MAPPING;
715 goto Done;
716 }
717
718 PageDirectory1GEntry = (VOID *)(
719 (PageMapLevel4Entry->Bits.PageTableBaseAddress <<
720 12) & ~PgTableMask
721 );
722 PageDirectory1GEntry += PDP_OFFSET (PhysicalAddress);
723 if (!PageDirectory1GEntry->Bits.Present) {
724 DEBUG ((
725 DEBUG_ERROR,
726 "%a:%a: bad PDPE for Physical=0x%Lx\n",
727 gEfiCallerBaseName,
728 __FUNCTION__,
729 PhysicalAddress
730 ));
731 Status = RETURN_NO_MAPPING;
732 goto Done;
733 }
734
735 //
736 // If the MustBe1 bit is not 1, it's not actually a 1GB entry
737 //
738 if (PageDirectory1GEntry->Bits.MustBe1) {
739 //
740 // Valid 1GB page
741 // If we have at least 1GB to go, we can just update this entry
742 //
743 if (!(PhysicalAddress & (BIT30 - 1)) && (Length >= BIT30)) {
744 SetOrClearSharedBit (&PageDirectory1GEntry->Uint64, Mode, PhysicalAddress, BIT30);
745 DEBUG ((
746 DEBUG_VERBOSE,
747 "%a:%a: updated 1GB entry for Physical=0x%Lx\n",
748 gEfiCallerBaseName,
749 __FUNCTION__,
750 PhysicalAddress
751 ));
752 PhysicalAddress += BIT30;
753 Length -= BIT30;
754 } else {
755 //
756 // We must split the page
757 //
758 DEBUG ((
759 DEBUG_VERBOSE,
760 "%a:%a: splitting 1GB page for Physical=0x%Lx\n",
761 gEfiCallerBaseName,
762 __FUNCTION__,
763 PhysicalAddress
764 ));
765 Split1GPageTo2M (
766 (UINT64)PageDirectory1GEntry->Bits.PageTableBaseAddress << 30,
767 (UINT64 *)PageDirectory1GEntry,
768 0,
769 0
770 );
771 continue;
772 }
773 } else {
774 //
775 // Actually a PDP
776 //
777 PageUpperDirectoryPointerEntry =
778 (PAGE_MAP_AND_DIRECTORY_POINTER *)PageDirectory1GEntry;
779 PageDirectory2MEntry =
780 (VOID *)(
781 (PageUpperDirectoryPointerEntry->Bits.PageTableBaseAddress <<
782 12) & ~PgTableMask
783 );
784 PageDirectory2MEntry += PDE_OFFSET (PhysicalAddress);
785 if (!PageDirectory2MEntry->Bits.Present) {
786 DEBUG ((
787 DEBUG_ERROR,
788 "%a:%a: bad PDE for Physical=0x%Lx\n",
789 gEfiCallerBaseName,
790 __FUNCTION__,
791 PhysicalAddress
792 ));
793 Status = RETURN_NO_MAPPING;
794 goto Done;
795 }
796
797 //
798 // If the MustBe1 bit is not a 1, it's not a 2MB entry
799 //
800 if (PageDirectory2MEntry->Bits.MustBe1) {
801 //
802 // Valid 2MB page
803 // If we have at least 2MB left to go, we can just update this entry
804 //
805 if (!(PhysicalAddress & (BIT21-1)) && (Length >= BIT21)) {
806 SetOrClearSharedBit (&PageDirectory2MEntry->Uint64, Mode, PhysicalAddress, BIT21);
807 PhysicalAddress += BIT21;
808 Length -= BIT21;
809 } else {
810 //
811 // We must split up this page into 4K pages
812 //
813 DEBUG ((
814 DEBUG_VERBOSE,
815 "%a:%a: splitting 2MB page for Physical=0x%Lx\n",
816 gEfiCallerBaseName,
817 __FUNCTION__,
818 PhysicalAddress
819 ));
820
821 ActiveEncMask = PageDirectory2MEntry->Uint64 & AddressEncMask;
822
823 Split2MPageTo4K (
824 (UINT64)PageDirectory2MEntry->Bits.PageTableBaseAddress << 21,
825 (UINT64 *)PageDirectory2MEntry,
826 0,
827 0,
828 ActiveEncMask
829 );
830 continue;
831 }
832 } else {
833 PageDirectoryPointerEntry =
834 (PAGE_MAP_AND_DIRECTORY_POINTER *)PageDirectory2MEntry;
835 PageTableEntry =
836 (VOID *)(
837 (PageDirectoryPointerEntry->Bits.PageTableBaseAddress <<
838 12) & ~PgTableMask
839 );
840 PageTableEntry += PTE_OFFSET (PhysicalAddress);
841 if (!PageTableEntry->Bits.Present) {
842 DEBUG ((
843 DEBUG_ERROR,
844 "%a:%a: bad PTE for Physical=0x%Lx\n",
845 gEfiCallerBaseName,
846 __FUNCTION__,
847 PhysicalAddress
848 ));
849 Status = RETURN_NO_MAPPING;
850 goto Done;
851 }
852
853 SetOrClearSharedBit (&PageTableEntry->Uint64, Mode, PhysicalAddress, EFI_PAGE_SIZE);
854 PhysicalAddress += EFI_PAGE_SIZE;
855 Length -= EFI_PAGE_SIZE;
856 }
857 }
858 }
859
860 //
861 // Protect the page table by marking the memory used for page table to be
862 // read-only.
863 //
864 if (IsWpEnabled) {
865 EnablePageTableProtection ((UINTN)PageMapLevel4Entry, TRUE);
866 }
867
868 //
869 // Flush TLB
870 //
871 CpuFlushTlb ();
872
873 Done:
874 //
875 // Restore page table write protection, if any.
876 //
877 if (IsWpEnabled) {
878 EnableReadOnlyPageWriteProtect ();
879 }
880
881 return Status;
882 }
883
884 /**
885 This function clears memory shared bit for the memory region specified by
886 BaseAddress and NumPages from the current page table context.
887
888 @param[in] Cr3BaseAddress Cr3 Base Address (if zero then use
889 current CR3)
890 @param[in] BaseAddress The physical address that is the start
891 address of a memory region.
892 @param[in] NumPages The number of pages from start memory
893 region.
894
895 @retval RETURN_SUCCESS The attributes were cleared for the
896 memory region.
897 @retval RETURN_INVALID_PARAMETER Number of pages is zero.
898 @retval RETURN_UNSUPPORTED Clearing the memory encryption attribute
899 is not supported
900 **/
901 RETURN_STATUS
902 EFIAPI
903 MemEncryptTdxSetPageSharedBit (
904 IN PHYSICAL_ADDRESS Cr3BaseAddress,
905 IN PHYSICAL_ADDRESS BaseAddress,
906 IN UINTN NumPages
907 )
908 {
909 return SetMemorySharedOrPrivate (
910 Cr3BaseAddress,
911 BaseAddress,
912 EFI_PAGES_TO_SIZE (NumPages),
913 SetSharedBit
914 );
915 }
916
917 /**
918 This function sets memory shared bit for the memory region specified by
919 BaseAddress and NumPages from the current page table context.
920
921 @param[in] Cr3BaseAddress Cr3 Base Address (if zero then use
922 current CR3)
923 @param[in] BaseAddress The physical address that is the start
924 address of a memory region.
925 @param[in] NumPages The number of pages from start memory
926 region.
927
928 @retval RETURN_SUCCESS The attributes were set for the memory
929 region.
930 @retval RETURN_INVALID_PARAMETER Number of pages is zero.
931 @retval RETURN_UNSUPPORTED Setting the memory encryption attribute
932 is not supported
933 **/
934 RETURN_STATUS
935 EFIAPI
936 MemEncryptTdxClearPageSharedBit (
937 IN PHYSICAL_ADDRESS Cr3BaseAddress,
938 IN PHYSICAL_ADDRESS BaseAddress,
939 IN UINTN NumPages
940 )
941 {
942 return SetMemorySharedOrPrivate (
943 Cr3BaseAddress,
944 BaseAddress,
945 EFI_PAGES_TO_SIZE (NumPages),
946 ClearSharedBit
947 );
948 }