]> git.proxmox.com Git - mirror_edk2.git/blob - OvmfPkg/Library/BaseMemEncryptTdxLib/MemoryEncryption.c
503f626d75c64275e0bdfb2f21572ca25cdb03da
[mirror_edk2.git] / OvmfPkg / Library / BaseMemEncryptTdxLib / MemoryEncryption.c
1 /** @file
2
3 Virtual Memory Management Services to set or clear the memory encryption.
4
5 Copyright (c) 2006 - 2018, Intel Corporation. All rights reserved.<BR>
6 Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
7
8 SPDX-License-Identifier: BSD-2-Clause-Patent
9
10 Code is derived from MdeModulePkg/Core/DxeIplPeim/X64/VirtualMemory.c
11
12 Note:
13 There a lot of duplicated codes for Page Table operations. These
14 codes should be moved to a common library (PageTablesLib) so that it is
15 more friendly for review and maintain. There is a new feature requirement
16 https://bugzilla.tianocore.org/show_bug.cgi?id=847 which is to implement
17 the library. After the lib is introduced, this file will be refactored.
18
19 **/
20
21 #include <Uefi.h>
22 #include <Uefi/UefiBaseType.h>
23 #include <Library/CpuLib.h>
24 #include <Library/BaseLib.h>
25 #include <Library/DebugLib.h>
26 #include <Library/MemEncryptTdxLib.h>
27 #include "VirtualMemory.h"
28 #include <IndustryStandard/Tdx.h>
29 #include <Library/TdxLib.h>
30 #include <Library/UefiBootServicesTableLib.h>
31 #include <Protocol/MemoryAccept.h>
32 #include <ConfidentialComputingGuestAttr.h>
33
34 typedef enum {
35 SetSharedBit,
36 ClearSharedBit
37 } TDX_PAGETABLE_MODE;
38
39 STATIC PAGE_TABLE_POOL *mPageTablePool = NULL;
40
41 /**
42 Returns boolean to indicate whether to indicate which, if any, memory encryption is enabled
43
44 @param[in] Type Bitmask of encryption technologies to check is enabled
45
46 @retval TRUE The encryption type(s) are enabled
47 @retval FALSE The encryption type(s) are not enabled
48 **/
49 BOOLEAN
50 EFIAPI
51 MemEncryptTdxIsEnabled (
52 VOID
53 )
54 {
55 return CC_GUEST_IS_TDX (PcdGet64 (PcdConfidentialComputingGuestAttr));
56 }
57
58 /**
59 Get the memory encryption mask
60
61 @param[out] EncryptionMask contains the pte mask.
62
63 **/
64 STATIC
65 UINT64
66 GetMemEncryptionAddressMask (
67 VOID
68 )
69 {
70 return TdSharedPageMask ();
71 }
72
73 /**
74 Initialize a buffer pool for page table use only.
75
76 To reduce the potential split operation on page table, the pages reserved for
77 page table should be allocated in the times of PAGE_TABLE_POOL_UNIT_PAGES and
78 at the boundary of PAGE_TABLE_POOL_ALIGNMENT. So the page pool is always
79 initialized with number of pages greater than or equal to the given
80 PoolPages.
81
82 Once the pages in the pool are used up, this method should be called again to
83 reserve at least another PAGE_TABLE_POOL_UNIT_PAGES. Usually this won't
84 happen often in practice.
85
86 @param[in] PoolPages The least page number of the pool to be created.
87
88 @retval TRUE The pool is initialized successfully.
89 @retval FALSE The memory is out of resource.
90 **/
91 STATIC
92 BOOLEAN
93 InitializePageTablePool (
94 IN UINTN PoolPages
95 )
96 {
97 VOID *Buffer;
98
99 //
100 // Always reserve at least PAGE_TABLE_POOL_UNIT_PAGES, including one page for
101 // header.
102 //
103 PoolPages += 1; // Add one page for header.
104 PoolPages = ((PoolPages - 1) / PAGE_TABLE_POOL_UNIT_PAGES + 1) *
105 PAGE_TABLE_POOL_UNIT_PAGES;
106 Buffer = AllocateAlignedPages (PoolPages, PAGE_TABLE_POOL_ALIGNMENT);
107 if (Buffer == NULL) {
108 DEBUG ((DEBUG_ERROR, "ERROR: Out of aligned pages\r\n"));
109 return FALSE;
110 }
111
112 //
113 // Link all pools into a list for easier track later.
114 //
115 if (mPageTablePool == NULL) {
116 mPageTablePool = Buffer;
117 mPageTablePool->NextPool = mPageTablePool;
118 } else {
119 ((PAGE_TABLE_POOL *)Buffer)->NextPool = mPageTablePool->NextPool;
120 mPageTablePool->NextPool = Buffer;
121 mPageTablePool = Buffer;
122 }
123
124 //
125 // Reserve one page for pool header.
126 //
127 mPageTablePool->FreePages = PoolPages - 1;
128 mPageTablePool->Offset = EFI_PAGES_TO_SIZE (1);
129
130 return TRUE;
131 }
132
133 /**
134 This API provides a way to allocate memory for page table.
135
136 This API can be called more than once to allocate memory for page tables.
137
138 Allocates the number of 4KB pages and returns a pointer to the allocated
139 buffer. The buffer returned is aligned on a 4KB boundary.
140
141 If Pages is 0, then NULL is returned.
142 If there is not enough memory remaining to satisfy the request, then NULL is
143 returned.
144
145 @param Pages The number of 4 KB pages to allocate.
146
147 @return A pointer to the allocated buffer or NULL if allocation fails.
148
149 **/
150 STATIC
151 VOID *
152 EFIAPI
153 AllocatePageTableMemory (
154 IN UINTN Pages
155 )
156 {
157 VOID *Buffer;
158
159 if (Pages == 0) {
160 return NULL;
161 }
162
163 //
164 // Renew the pool if necessary.
165 //
166 if ((mPageTablePool == NULL) ||
167 (Pages > mPageTablePool->FreePages))
168 {
169 if (!InitializePageTablePool (Pages)) {
170 return NULL;
171 }
172 }
173
174 Buffer = (UINT8 *)mPageTablePool + mPageTablePool->Offset;
175
176 mPageTablePool->Offset += EFI_PAGES_TO_SIZE (Pages);
177 mPageTablePool->FreePages -= Pages;
178
179 DEBUG ((
180 DEBUG_VERBOSE,
181 "%a:%a: Buffer=0x%Lx Pages=%ld\n",
182 gEfiCallerBaseName,
183 __FUNCTION__,
184 Buffer,
185 Pages
186 ));
187
188 return Buffer;
189 }
190
191 /**
192 Split 2M page to 4K.
193
194 @param[in] PhysicalAddress Start physical address the 2M page
195 covered.
196 @param[in, out] PageEntry2M Pointer to 2M page entry.
197 @param[in] StackBase Stack base address.
198 @param[in] StackSize Stack size.
199
200 **/
201 STATIC
202 VOID
203 Split2MPageTo4K (
204 IN PHYSICAL_ADDRESS PhysicalAddress,
205 IN OUT UINT64 *PageEntry2M,
206 IN PHYSICAL_ADDRESS StackBase,
207 IN UINTN StackSize,
208 IN UINT64 AddressEncMask
209 )
210 {
211 PHYSICAL_ADDRESS PhysicalAddress4K;
212 UINTN IndexOfPageTableEntries;
213 PAGE_TABLE_4K_ENTRY *PageTableEntry, *PageTableEntry1;
214
215 PageTableEntry = AllocatePageTableMemory (1);
216
217 PageTableEntry1 = PageTableEntry;
218
219 if (PageTableEntry == NULL) {
220 ASSERT (FALSE);
221 return;
222 }
223
224 PhysicalAddress4K = PhysicalAddress;
225 for (IndexOfPageTableEntries = 0;
226 IndexOfPageTableEntries < 512;
227 (IndexOfPageTableEntries++,
228 PageTableEntry++,
229 PhysicalAddress4K += SIZE_4KB))
230 {
231 //
232 // Fill in the Page Table entries
233 //
234 PageTableEntry->Uint64 = (UINT64)PhysicalAddress4K | AddressEncMask;
235 PageTableEntry->Bits.ReadWrite = 1;
236 PageTableEntry->Bits.Present = 1;
237 if ((PhysicalAddress4K >= StackBase) &&
238 (PhysicalAddress4K < StackBase + StackSize))
239 {
240 //
241 // Set Nx bit for stack.
242 //
243 PageTableEntry->Bits.Nx = 1;
244 }
245 }
246
247 //
248 // Fill in 2M page entry.
249 //
250 *PageEntry2M = ((UINT64)(UINTN)PageTableEntry1 |
251 IA32_PG_P | IA32_PG_RW | AddressEncMask);
252 }
253
254 /**
255 Set one page of page table pool memory to be read-only.
256
257 @param[in] PageTableBase Base address of page table (CR3).
258 @param[in] Address Start address of a page to be set as read-only.
259 @param[in] Level4Paging Level 4 paging flag.
260
261 **/
262 STATIC
263 VOID
264 SetPageTablePoolReadOnly (
265 IN UINTN PageTableBase,
266 IN EFI_PHYSICAL_ADDRESS Address,
267 IN BOOLEAN Level4Paging
268 )
269 {
270 UINTN Index;
271 UINTN EntryIndex;
272 UINT64 AddressEncMask;
273 UINT64 ActiveAddressEncMask;
274 EFI_PHYSICAL_ADDRESS PhysicalAddress;
275 UINT64 *PageTable;
276 UINT64 *NewPageTable;
277 UINT64 PageAttr;
278 UINT64 LevelSize[5];
279 UINT64 LevelMask[5];
280 UINTN LevelShift[5];
281 UINTN Level;
282 UINT64 PoolUnitSize;
283
284 if (PageTableBase == 0) {
285 ASSERT (FALSE);
286 return;
287 }
288
289 //
290 // Since the page table is always from page table pool, which is always
291 // located at the boundary of PcdPageTablePoolAlignment, we just need to
292 // set the whole pool unit to be read-only.
293 //
294 Address = Address & PAGE_TABLE_POOL_ALIGN_MASK;
295
296 LevelShift[1] = PAGING_L1_ADDRESS_SHIFT;
297 LevelShift[2] = PAGING_L2_ADDRESS_SHIFT;
298 LevelShift[3] = PAGING_L3_ADDRESS_SHIFT;
299 LevelShift[4] = PAGING_L4_ADDRESS_SHIFT;
300
301 LevelMask[1] = PAGING_4K_ADDRESS_MASK_64;
302 LevelMask[2] = PAGING_2M_ADDRESS_MASK_64;
303 LevelMask[3] = PAGING_1G_ADDRESS_MASK_64;
304 LevelMask[4] = PAGING_1G_ADDRESS_MASK_64;
305
306 LevelSize[1] = SIZE_4KB;
307 LevelSize[2] = SIZE_2MB;
308 LevelSize[3] = SIZE_1GB;
309 LevelSize[4] = SIZE_512GB;
310
311 AddressEncMask = GetMemEncryptionAddressMask () &
312 PAGING_1G_ADDRESS_MASK_64;
313 PageTable = (UINT64 *)(UINTN)PageTableBase;
314 PoolUnitSize = PAGE_TABLE_POOL_UNIT_SIZE;
315
316 for (Level = (Level4Paging) ? 4 : 3; Level > 0; --Level) {
317 Index = ((UINTN)RShiftU64 (Address, LevelShift[Level]));
318 Index &= PAGING_PAE_INDEX_MASK;
319
320 PageAttr = PageTable[Index];
321 ActiveAddressEncMask = GetMemEncryptionAddressMask () & PageAttr;
322
323 if ((PageAttr & IA32_PG_PS) == 0) {
324 //
325 // Go to next level of table.
326 //
327 PageTable = (UINT64 *)(UINTN)(PageAttr & ~AddressEncMask &
328 PAGING_4K_ADDRESS_MASK_64);
329 continue;
330 }
331
332 if (PoolUnitSize >= LevelSize[Level]) {
333 //
334 // Clear R/W bit if current page granularity is not larger than pool unit
335 // size.
336 //
337 if ((PageAttr & IA32_PG_RW) != 0) {
338 while (PoolUnitSize > 0) {
339 //
340 // PAGE_TABLE_POOL_UNIT_SIZE and PAGE_TABLE_POOL_ALIGNMENT are fit in
341 // one page (2MB). Then we don't need to update attributes for pages
342 // crossing page directory. ASSERT below is for that purpose.
343 //
344 ASSERT (Index < EFI_PAGE_SIZE/sizeof (UINT64));
345
346 PageTable[Index] &= ~(UINT64)IA32_PG_RW;
347 PoolUnitSize -= LevelSize[Level];
348
349 ++Index;
350 }
351 }
352
353 break;
354 } else {
355 //
356 // The smaller granularity of page must be needed.
357 //
358 ASSERT (Level > 1);
359
360 NewPageTable = AllocatePageTableMemory (1);
361 if (NewPageTable == NULL) {
362 ASSERT (FALSE);
363 return;
364 }
365
366 PhysicalAddress = PageAttr & LevelMask[Level];
367 for (EntryIndex = 0;
368 EntryIndex < EFI_PAGE_SIZE/sizeof (UINT64);
369 ++EntryIndex)
370 {
371 NewPageTable[EntryIndex] = PhysicalAddress | ActiveAddressEncMask |
372 IA32_PG_P | IA32_PG_RW;
373 if (Level > 2) {
374 NewPageTable[EntryIndex] |= IA32_PG_PS;
375 }
376
377 PhysicalAddress += LevelSize[Level - 1];
378 }
379
380 PageTable[Index] = (UINT64)(UINTN)NewPageTable | ActiveAddressEncMask |
381 IA32_PG_P | IA32_PG_RW;
382 PageTable = NewPageTable;
383 }
384 }
385 }
386
387 /**
388 Prevent the memory pages used for page table from been overwritten.
389
390 @param[in] PageTableBase Base address of page table (CR3).
391 @param[in] Level4Paging Level 4 paging flag.
392
393 **/
394 STATIC
395 VOID
396 EnablePageTableProtection (
397 IN UINTN PageTableBase,
398 IN BOOLEAN Level4Paging
399 )
400 {
401 PAGE_TABLE_POOL *HeadPool;
402 PAGE_TABLE_POOL *Pool;
403 UINT64 PoolSize;
404 EFI_PHYSICAL_ADDRESS Address;
405
406 if (mPageTablePool == NULL) {
407 return;
408 }
409
410 //
411 // SetPageTablePoolReadOnly might update mPageTablePool. It's safer to
412 // remember original one in advance.
413 //
414 HeadPool = mPageTablePool;
415 Pool = HeadPool;
416 do {
417 Address = (EFI_PHYSICAL_ADDRESS)(UINTN)Pool;
418 PoolSize = Pool->Offset + EFI_PAGES_TO_SIZE (Pool->FreePages);
419
420 //
421 // The size of one pool must be multiple of PAGE_TABLE_POOL_UNIT_SIZE,
422 // which is one of page size of the processor (2MB by default). Let's apply
423 // the protection to them one by one.
424 //
425 while (PoolSize > 0) {
426 SetPageTablePoolReadOnly (PageTableBase, Address, Level4Paging);
427 Address += PAGE_TABLE_POOL_UNIT_SIZE;
428 PoolSize -= PAGE_TABLE_POOL_UNIT_SIZE;
429 }
430
431 Pool = Pool->NextPool;
432 } while (Pool != HeadPool);
433 }
434
435 /**
436 Split 1G page to 2M.
437
438 @param[in] PhysicalAddress Start physical address the 1G page
439 covered.
440 @param[in, out] PageEntry1G Pointer to 1G page entry.
441 @param[in] StackBase Stack base address.
442 @param[in] StackSize Stack size.
443
444 **/
445 STATIC
446 VOID
447 Split1GPageTo2M (
448 IN PHYSICAL_ADDRESS PhysicalAddress,
449 IN OUT UINT64 *PageEntry1G,
450 IN PHYSICAL_ADDRESS StackBase,
451 IN UINTN StackSize
452 )
453 {
454 PHYSICAL_ADDRESS PhysicalAddress2M;
455 UINTN IndexOfPageDirectoryEntries;
456 PAGE_TABLE_ENTRY *PageDirectoryEntry;
457 UINT64 AddressEncMask;
458 UINT64 ActiveAddressEncMask;
459
460 PageDirectoryEntry = AllocatePageTableMemory (1);
461 if (PageDirectoryEntry == NULL) {
462 return;
463 }
464
465 AddressEncMask = GetMemEncryptionAddressMask ();
466 ASSERT (PageDirectoryEntry != NULL);
467
468 ActiveAddressEncMask = *PageEntry1G & AddressEncMask;
469 //
470 // Fill in 1G page entry.
471 //
472 *PageEntry1G = ((UINT64)(UINTN)PageDirectoryEntry |
473 IA32_PG_P | IA32_PG_RW | ActiveAddressEncMask);
474
475 PhysicalAddress2M = PhysicalAddress;
476 for (IndexOfPageDirectoryEntries = 0;
477 IndexOfPageDirectoryEntries < 512;
478 (IndexOfPageDirectoryEntries++,
479 PageDirectoryEntry++,
480 PhysicalAddress2M += SIZE_2MB))
481 {
482 if ((PhysicalAddress2M < StackBase + StackSize) &&
483 ((PhysicalAddress2M + SIZE_2MB) > StackBase))
484 {
485 //
486 // Need to split this 2M page that covers stack range.
487 //
488 Split2MPageTo4K (
489 PhysicalAddress2M,
490 (UINT64 *)PageDirectoryEntry,
491 StackBase,
492 StackSize,
493 ActiveAddressEncMask
494 );
495 } else {
496 //
497 // Fill in the Page Directory entries
498 //
499 PageDirectoryEntry->Uint64 = (UINT64)PhysicalAddress2M | ActiveAddressEncMask;
500 PageDirectoryEntry->Bits.ReadWrite = 1;
501 PageDirectoryEntry->Bits.Present = 1;
502 PageDirectoryEntry->Bits.MustBe1 = 1;
503 }
504 }
505 }
506
507 /**
508 Set or Clear the memory shared bit
509
510 @param[in] PagetablePoint Page table entry pointer (PTE).
511 @param[in] Mode Set or Clear shared bit
512
513 **/
514 STATIC VOID
515 SetOrClearSharedBit (
516 IN OUT UINT64 *PageTablePointer,
517 IN TDX_PAGETABLE_MODE Mode,
518 IN PHYSICAL_ADDRESS PhysicalAddress,
519 IN UINT64 Length
520 )
521 {
522 UINT64 AddressEncMask;
523 UINT64 Status;
524 EDKII_MEMORY_ACCEPT_PROTOCOL *MemoryAcceptProtocol;
525
526 AddressEncMask = GetMemEncryptionAddressMask ();
527
528 //
529 // Set or clear page table entry. Also, set shared bit in physical address, before calling MapGPA
530 //
531 if (Mode == SetSharedBit) {
532 *PageTablePointer |= AddressEncMask;
533 PhysicalAddress |= AddressEncMask;
534 } else {
535 *PageTablePointer &= ~AddressEncMask;
536 PhysicalAddress &= ~AddressEncMask;
537 }
538
539 Status = TdVmCall (TDVMCALL_MAPGPA, PhysicalAddress, Length, 0, 0, NULL);
540
541 //
542 // If changing shared to private, must accept-page again
543 //
544 if (Mode == ClearSharedBit) {
545 Status = gBS->LocateProtocol (&gEdkiiMemoryAcceptProtocolGuid, NULL, (VOID **)&MemoryAcceptProtocol);
546 ASSERT (!EFI_ERROR (Status));
547 Status = MemoryAcceptProtocol->AcceptMemory (MemoryAcceptProtocol, PhysicalAddress, Length);
548 ASSERT (!EFI_ERROR (Status));
549 }
550
551 DEBUG ((
552 DEBUG_VERBOSE,
553 "%a:%a: pte=0x%Lx AddressEncMask=0x%Lx Mode=0x%x MapGPA Status=0x%x\n",
554 gEfiCallerBaseName,
555 __FUNCTION__,
556 *PageTablePointer,
557 AddressEncMask,
558 Mode,
559 Status
560 ));
561 }
562
563 /**
564 Check the WP status in CR0 register. This bit is used to lock or unlock write
565 access to pages marked as read-only.
566
567 @retval TRUE Write protection is enabled.
568 @retval FALSE Write protection is disabled.
569 **/
570 STATIC
571 BOOLEAN
572 IsReadOnlyPageWriteProtected (
573 VOID
574 )
575 {
576 return ((AsmReadCr0 () & BIT16) != 0);
577 }
578
579 /**
580 Disable Write Protect on pages marked as read-only.
581 **/
582 STATIC
583 VOID
584 DisableReadOnlyPageWriteProtect (
585 VOID
586 )
587 {
588 AsmWriteCr0 (AsmReadCr0 () & ~BIT16);
589 }
590
591 /**
592 Enable Write Protect on pages marked as read-only.
593 **/
594 VOID
595 EnableReadOnlyPageWriteProtect (
596 VOID
597 )
598 {
599 AsmWriteCr0 (AsmReadCr0 () | BIT16);
600 }
601
602 /**
603 This function either sets or clears memory encryption for the memory
604 region specified by PhysicalAddress and Length from the current page table
605 context.
606
607 The function iterates through the PhysicalAddress one page at a time, and set
608 or clears the memory encryption in the page table. If it encounters
609 that a given physical address range is part of large page then it attempts to
610 change the attribute at one go (based on size), otherwise it splits the
611 large pages into smaller (e.g 2M page into 4K pages) and then try to set or
612 clear the shared bit on the smallest page size.
613
614 @param[in] Cr3BaseAddress Cr3 Base Address (if zero then use
615 current CR3)
616 @param[in] PhysicalAddress The physical address that is the start
617 address of a memory region.
618 @param[in] Length The length of memory region
619 @param[in] Mode Set or Clear mode
620
621 @retval RETURN_SUCCESS The attributes were cleared for the
622 memory region.
623 @retval RETURN_INVALID_PARAMETER Number of pages is zero.
624 @retval RETURN_UNSUPPORTED Setting the memory encyrption attribute
625 is not supported
626 **/
627 STATIC
628 RETURN_STATUS
629 EFIAPI
630 SetMemorySharedOrPrivate (
631 IN PHYSICAL_ADDRESS Cr3BaseAddress,
632 IN PHYSICAL_ADDRESS PhysicalAddress,
633 IN UINTN Length,
634 IN TDX_PAGETABLE_MODE Mode
635 )
636 {
637 PAGE_MAP_AND_DIRECTORY_POINTER *PageMapLevel4Entry;
638 PAGE_MAP_AND_DIRECTORY_POINTER *PageUpperDirectoryPointerEntry;
639 PAGE_MAP_AND_DIRECTORY_POINTER *PageDirectoryPointerEntry;
640 PAGE_TABLE_1G_ENTRY *PageDirectory1GEntry;
641 PAGE_TABLE_ENTRY *PageDirectory2MEntry;
642 PAGE_TABLE_4K_ENTRY *PageTableEntry;
643 UINT64 PgTableMask;
644 UINT64 AddressEncMask;
645 UINT64 ActiveEncMask;
646 BOOLEAN IsWpEnabled;
647 RETURN_STATUS Status;
648 IA32_CR4 Cr4;
649 BOOLEAN Page5LevelSupport;
650
651 //
652 // Set PageMapLevel4Entry to suppress incorrect compiler/analyzer warnings.
653 //
654 PageMapLevel4Entry = NULL;
655
656 DEBUG ((
657 DEBUG_VERBOSE,
658 "%a:%a: Cr3Base=0x%Lx Physical=0x%Lx Length=0x%Lx Mode=%a\n",
659 gEfiCallerBaseName,
660 __FUNCTION__,
661 Cr3BaseAddress,
662 PhysicalAddress,
663 (UINT64)Length,
664 (Mode == SetSharedBit) ? "Shared" : "Private"
665 ));
666
667 //
668 // Check if we have a valid memory encryption mask
669 //
670 AddressEncMask = GetMemEncryptionAddressMask ();
671
672 PgTableMask = AddressEncMask | EFI_PAGE_MASK;
673
674 if (Length == 0) {
675 return RETURN_INVALID_PARAMETER;
676 }
677
678 //
679 // Make sure that the page table is changeable.
680 //
681 IsWpEnabled = IsReadOnlyPageWriteProtected ();
682 if (IsWpEnabled) {
683 DisableReadOnlyPageWriteProtect ();
684 }
685
686 //
687 // If Cr3BaseAddress is not specified then read the current CR3
688 //
689 if (Cr3BaseAddress == 0) {
690 Cr3BaseAddress = AsmReadCr3 ();
691 }
692
693 //
694 // CPU will already have LA57 enabled so just check CR4
695 //
696 Cr4.UintN = AsmReadCr4 ();
697
698 Page5LevelSupport = (Cr4.Bits.LA57 ? TRUE : FALSE);
699 //
700 // If 5-level pages, adjust Cr3BaseAddress to point to first 4-level page directory,
701 // we will only have 1
702 //
703 if (Page5LevelSupport) {
704 Cr3BaseAddress = *(UINT64 *)Cr3BaseAddress & ~PgTableMask;
705 }
706
707 Status = EFI_SUCCESS;
708
709 while (Length) {
710 PageMapLevel4Entry = (VOID *)(Cr3BaseAddress & ~PgTableMask);
711 PageMapLevel4Entry += PML4_OFFSET (PhysicalAddress);
712 if (!PageMapLevel4Entry->Bits.Present) {
713 DEBUG ((
714 DEBUG_ERROR,
715 "%a:%a: bad PML4 for Physical=0x%Lx\n",
716 gEfiCallerBaseName,
717 __FUNCTION__,
718 PhysicalAddress
719 ));
720 Status = RETURN_NO_MAPPING;
721 goto Done;
722 }
723
724 PageDirectory1GEntry = (VOID *)(
725 (PageMapLevel4Entry->Bits.PageTableBaseAddress <<
726 12) & ~PgTableMask
727 );
728 PageDirectory1GEntry += PDP_OFFSET (PhysicalAddress);
729 if (!PageDirectory1GEntry->Bits.Present) {
730 DEBUG ((
731 DEBUG_ERROR,
732 "%a:%a: bad PDPE for Physical=0x%Lx\n",
733 gEfiCallerBaseName,
734 __FUNCTION__,
735 PhysicalAddress
736 ));
737 Status = RETURN_NO_MAPPING;
738 goto Done;
739 }
740
741 //
742 // If the MustBe1 bit is not 1, it's not actually a 1GB entry
743 //
744 if (PageDirectory1GEntry->Bits.MustBe1) {
745 //
746 // Valid 1GB page
747 // If we have at least 1GB to go, we can just update this entry
748 //
749 if (!(PhysicalAddress & (BIT30 - 1)) && (Length >= BIT30)) {
750 SetOrClearSharedBit (&PageDirectory1GEntry->Uint64, Mode, PhysicalAddress, BIT30);
751 DEBUG ((
752 DEBUG_VERBOSE,
753 "%a:%a: updated 1GB entry for Physical=0x%Lx\n",
754 gEfiCallerBaseName,
755 __FUNCTION__,
756 PhysicalAddress
757 ));
758 PhysicalAddress += BIT30;
759 Length -= BIT30;
760 } else {
761 //
762 // We must split the page
763 //
764 DEBUG ((
765 DEBUG_VERBOSE,
766 "%a:%a: splitting 1GB page for Physical=0x%Lx\n",
767 gEfiCallerBaseName,
768 __FUNCTION__,
769 PhysicalAddress
770 ));
771 Split1GPageTo2M (
772 (UINT64)PageDirectory1GEntry->Bits.PageTableBaseAddress << 30,
773 (UINT64 *)PageDirectory1GEntry,
774 0,
775 0
776 );
777 continue;
778 }
779 } else {
780 //
781 // Actually a PDP
782 //
783 PageUpperDirectoryPointerEntry =
784 (PAGE_MAP_AND_DIRECTORY_POINTER *)PageDirectory1GEntry;
785 PageDirectory2MEntry =
786 (VOID *)(
787 (PageUpperDirectoryPointerEntry->Bits.PageTableBaseAddress <<
788 12) & ~PgTableMask
789 );
790 PageDirectory2MEntry += PDE_OFFSET (PhysicalAddress);
791 if (!PageDirectory2MEntry->Bits.Present) {
792 DEBUG ((
793 DEBUG_ERROR,
794 "%a:%a: bad PDE for Physical=0x%Lx\n",
795 gEfiCallerBaseName,
796 __FUNCTION__,
797 PhysicalAddress
798 ));
799 Status = RETURN_NO_MAPPING;
800 goto Done;
801 }
802
803 //
804 // If the MustBe1 bit is not a 1, it's not a 2MB entry
805 //
806 if (PageDirectory2MEntry->Bits.MustBe1) {
807 //
808 // Valid 2MB page
809 // If we have at least 2MB left to go, we can just update this entry
810 //
811 if (!(PhysicalAddress & (BIT21-1)) && (Length >= BIT21)) {
812 SetOrClearSharedBit (&PageDirectory2MEntry->Uint64, Mode, PhysicalAddress, BIT21);
813 PhysicalAddress += BIT21;
814 Length -= BIT21;
815 } else {
816 //
817 // We must split up this page into 4K pages
818 //
819 DEBUG ((
820 DEBUG_VERBOSE,
821 "%a:%a: splitting 2MB page for Physical=0x%Lx\n",
822 gEfiCallerBaseName,
823 __FUNCTION__,
824 PhysicalAddress
825 ));
826
827 ActiveEncMask = PageDirectory2MEntry->Uint64 & AddressEncMask;
828
829 Split2MPageTo4K (
830 (UINT64)PageDirectory2MEntry->Bits.PageTableBaseAddress << 21,
831 (UINT64 *)PageDirectory2MEntry,
832 0,
833 0,
834 ActiveEncMask
835 );
836 continue;
837 }
838 } else {
839 PageDirectoryPointerEntry =
840 (PAGE_MAP_AND_DIRECTORY_POINTER *)PageDirectory2MEntry;
841 PageTableEntry =
842 (VOID *)(
843 (PageDirectoryPointerEntry->Bits.PageTableBaseAddress <<
844 12) & ~PgTableMask
845 );
846 PageTableEntry += PTE_OFFSET (PhysicalAddress);
847 if (!PageTableEntry->Bits.Present) {
848 DEBUG ((
849 DEBUG_ERROR,
850 "%a:%a: bad PTE for Physical=0x%Lx\n",
851 gEfiCallerBaseName,
852 __FUNCTION__,
853 PhysicalAddress
854 ));
855 Status = RETURN_NO_MAPPING;
856 goto Done;
857 }
858
859 SetOrClearSharedBit (&PageTableEntry->Uint64, Mode, PhysicalAddress, EFI_PAGE_SIZE);
860 PhysicalAddress += EFI_PAGE_SIZE;
861 Length -= EFI_PAGE_SIZE;
862 }
863 }
864 }
865
866 //
867 // Protect the page table by marking the memory used for page table to be
868 // read-only.
869 //
870 if (IsWpEnabled) {
871 EnablePageTableProtection ((UINTN)PageMapLevel4Entry, TRUE);
872 }
873
874 //
875 // Flush TLB
876 //
877 CpuFlushTlb ();
878
879 Done:
880 //
881 // Restore page table write protection, if any.
882 //
883 if (IsWpEnabled) {
884 EnableReadOnlyPageWriteProtect ();
885 }
886
887 return Status;
888 }
889
890 /**
891 This function clears memory shared bit for the memory region specified by
892 BaseAddress and NumPages from the current page table context.
893
894 @param[in] Cr3BaseAddress Cr3 Base Address (if zero then use
895 current CR3)
896 @param[in] BaseAddress The physical address that is the start
897 address of a memory region.
898 @param[in] NumPages The number of pages from start memory
899 region.
900
901 @retval RETURN_SUCCESS The attributes were cleared for the
902 memory region.
903 @retval RETURN_INVALID_PARAMETER Number of pages is zero.
904 @retval RETURN_UNSUPPORTED Clearing the memory encryption attribute
905 is not supported
906 **/
907 RETURN_STATUS
908 EFIAPI
909 MemEncryptTdxSetPageSharedBit (
910 IN PHYSICAL_ADDRESS Cr3BaseAddress,
911 IN PHYSICAL_ADDRESS BaseAddress,
912 IN UINTN NumPages
913 )
914 {
915 return SetMemorySharedOrPrivate (
916 Cr3BaseAddress,
917 BaseAddress,
918 EFI_PAGES_TO_SIZE (NumPages),
919 SetSharedBit
920 );
921 }
922
923 /**
924 This function sets memory shared bit for the memory region specified by
925 BaseAddress and NumPages from the current page table context.
926
927 @param[in] Cr3BaseAddress Cr3 Base Address (if zero then use
928 current CR3)
929 @param[in] BaseAddress The physical address that is the start
930 address of a memory region.
931 @param[in] NumPages The number of pages from start memory
932 region.
933
934 @retval RETURN_SUCCESS The attributes were set for the memory
935 region.
936 @retval RETURN_INVALID_PARAMETER Number of pages is zero.
937 @retval RETURN_UNSUPPORTED Setting the memory encryption attribute
938 is not supported
939 **/
940 RETURN_STATUS
941 EFIAPI
942 MemEncryptTdxClearPageSharedBit (
943 IN PHYSICAL_ADDRESS Cr3BaseAddress,
944 IN PHYSICAL_ADDRESS BaseAddress,
945 IN UINTN NumPages
946 )
947 {
948 return SetMemorySharedOrPrivate (
949 Cr3BaseAddress,
950 BaseAddress,
951 EFI_PAGES_TO_SIZE (NumPages),
952 ClearSharedBit
953 );
954 }