]> git.proxmox.com Git - mirror_edk2.git/blob - OvmfPkg/Library/BaseMemEncryptTdxLib/MemoryEncryption.c
OvmfPkg/BaseMemEncryptTdxLib: Refactor error handle of SetOrClearSharedBit
[mirror_edk2.git] / OvmfPkg / Library / BaseMemEncryptTdxLib / MemoryEncryption.c
1 /** @file
2
3 Virtual Memory Management Services to set or clear the memory encryption.
4
5 Copyright (c) 2006 - 2018, Intel Corporation. All rights reserved.<BR>
6 Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
7
8 SPDX-License-Identifier: BSD-2-Clause-Patent
9
10 Code is derived from MdeModulePkg/Core/DxeIplPeim/X64/VirtualMemory.c
11
12 Note:
13 There a lot of duplicated codes for Page Table operations. These
14 codes should be moved to a common library (PageTablesLib) so that it is
15 more friendly for review and maintain. There is a new feature requirement
16 https://bugzilla.tianocore.org/show_bug.cgi?id=847 which is to implement
17 the library. After the lib is introduced, this file will be refactored.
18
19 **/
20
21 #include <Uefi.h>
22 #include <Uefi/UefiBaseType.h>
23 #include <Library/CpuLib.h>
24 #include <Library/BaseLib.h>
25 #include <Library/DebugLib.h>
26 #include <Library/MemEncryptTdxLib.h>
27 #include "VirtualMemory.h"
28 #include <IndustryStandard/Tdx.h>
29 #include <Library/TdxLib.h>
30 #include <Library/UefiBootServicesTableLib.h>
31 #include <Protocol/MemoryAccept.h>
32 #include <ConfidentialComputingGuestAttr.h>
33
34 typedef enum {
35 SetSharedBit,
36 ClearSharedBit
37 } TDX_PAGETABLE_MODE;
38
39 STATIC PAGE_TABLE_POOL *mPageTablePool = NULL;
40
41 /**
42 Returns boolean to indicate whether to indicate which, if any, memory encryption is enabled
43
44 @param[in] Type Bitmask of encryption technologies to check is enabled
45
46 @retval TRUE The encryption type(s) are enabled
47 @retval FALSE The encryption type(s) are not enabled
48 **/
49 BOOLEAN
50 EFIAPI
51 MemEncryptTdxIsEnabled (
52 VOID
53 )
54 {
55 return CC_GUEST_IS_TDX (PcdGet64 (PcdConfidentialComputingGuestAttr));
56 }
57
58 /**
59 Get the memory encryption mask
60
61 @param[out] EncryptionMask contains the pte mask.
62
63 **/
64 STATIC
65 UINT64
66 GetMemEncryptionAddressMask (
67 VOID
68 )
69 {
70 return TdSharedPageMask ();
71 }
72
73 /**
74 Initialize a buffer pool for page table use only.
75
76 To reduce the potential split operation on page table, the pages reserved for
77 page table should be allocated in the times of PAGE_TABLE_POOL_UNIT_PAGES and
78 at the boundary of PAGE_TABLE_POOL_ALIGNMENT. So the page pool is always
79 initialized with number of pages greater than or equal to the given
80 PoolPages.
81
82 Once the pages in the pool are used up, this method should be called again to
83 reserve at least another PAGE_TABLE_POOL_UNIT_PAGES. Usually this won't
84 happen often in practice.
85
86 @param[in] PoolPages The least page number of the pool to be created.
87
88 @retval TRUE The pool is initialized successfully.
89 @retval FALSE The memory is out of resource.
90 **/
91 STATIC
92 BOOLEAN
93 InitializePageTablePool (
94 IN UINTN PoolPages
95 )
96 {
97 VOID *Buffer;
98
99 //
100 // Always reserve at least PAGE_TABLE_POOL_UNIT_PAGES, including one page for
101 // header.
102 //
103 PoolPages += 1; // Add one page for header.
104 PoolPages = ((PoolPages - 1) / PAGE_TABLE_POOL_UNIT_PAGES + 1) *
105 PAGE_TABLE_POOL_UNIT_PAGES;
106 Buffer = AllocateAlignedPages (PoolPages, PAGE_TABLE_POOL_ALIGNMENT);
107 if (Buffer == NULL) {
108 DEBUG ((DEBUG_ERROR, "ERROR: Out of aligned pages\r\n"));
109 return FALSE;
110 }
111
112 //
113 // Link all pools into a list for easier track later.
114 //
115 if (mPageTablePool == NULL) {
116 mPageTablePool = Buffer;
117 mPageTablePool->NextPool = mPageTablePool;
118 } else {
119 ((PAGE_TABLE_POOL *)Buffer)->NextPool = mPageTablePool->NextPool;
120 mPageTablePool->NextPool = Buffer;
121 mPageTablePool = Buffer;
122 }
123
124 //
125 // Reserve one page for pool header.
126 //
127 mPageTablePool->FreePages = PoolPages - 1;
128 mPageTablePool->Offset = EFI_PAGES_TO_SIZE (1);
129
130 return TRUE;
131 }
132
133 /**
134 This API provides a way to allocate memory for page table.
135
136 This API can be called more than once to allocate memory for page tables.
137
138 Allocates the number of 4KB pages and returns a pointer to the allocated
139 buffer. The buffer returned is aligned on a 4KB boundary.
140
141 If Pages is 0, then NULL is returned.
142 If there is not enough memory remaining to satisfy the request, then NULL is
143 returned.
144
145 @param Pages The number of 4 KB pages to allocate.
146
147 @return A pointer to the allocated buffer or NULL if allocation fails.
148
149 **/
150 STATIC
151 VOID *
152 EFIAPI
153 AllocatePageTableMemory (
154 IN UINTN Pages
155 )
156 {
157 VOID *Buffer;
158
159 if (Pages == 0) {
160 return NULL;
161 }
162
163 //
164 // Renew the pool if necessary.
165 //
166 if ((mPageTablePool == NULL) ||
167 (Pages > mPageTablePool->FreePages))
168 {
169 if (!InitializePageTablePool (Pages)) {
170 return NULL;
171 }
172 }
173
174 Buffer = (UINT8 *)mPageTablePool + mPageTablePool->Offset;
175
176 mPageTablePool->Offset += EFI_PAGES_TO_SIZE (Pages);
177 mPageTablePool->FreePages -= Pages;
178
179 DEBUG ((
180 DEBUG_VERBOSE,
181 "%a:%a: Buffer=0x%Lx Pages=%ld\n",
182 gEfiCallerBaseName,
183 __FUNCTION__,
184 Buffer,
185 Pages
186 ));
187
188 return Buffer;
189 }
190
191 /**
192 Split 2M page to 4K.
193
194 @param[in] PhysicalAddress Start physical address the 2M page
195 covered.
196 @param[in, out] PageEntry2M Pointer to 2M page entry.
197 @param[in] StackBase Stack base address.
198 @param[in] StackSize Stack size.
199
200 **/
201 STATIC
202 VOID
203 Split2MPageTo4K (
204 IN PHYSICAL_ADDRESS PhysicalAddress,
205 IN OUT UINT64 *PageEntry2M,
206 IN PHYSICAL_ADDRESS StackBase,
207 IN UINTN StackSize,
208 IN UINT64 AddressEncMask
209 )
210 {
211 PHYSICAL_ADDRESS PhysicalAddress4K;
212 UINTN IndexOfPageTableEntries;
213 PAGE_TABLE_4K_ENTRY *PageTableEntry, *PageTableEntry1;
214
215 PageTableEntry = AllocatePageTableMemory (1);
216
217 PageTableEntry1 = PageTableEntry;
218
219 if (PageTableEntry == NULL) {
220 ASSERT (FALSE);
221 return;
222 }
223
224 PhysicalAddress4K = PhysicalAddress;
225 for (IndexOfPageTableEntries = 0;
226 IndexOfPageTableEntries < 512;
227 (IndexOfPageTableEntries++,
228 PageTableEntry++,
229 PhysicalAddress4K += SIZE_4KB))
230 {
231 //
232 // Fill in the Page Table entries
233 //
234 PageTableEntry->Uint64 = (UINT64)PhysicalAddress4K | AddressEncMask;
235 PageTableEntry->Bits.ReadWrite = 1;
236 PageTableEntry->Bits.Present = 1;
237 if ((PhysicalAddress4K >= StackBase) &&
238 (PhysicalAddress4K < StackBase + StackSize))
239 {
240 //
241 // Set Nx bit for stack.
242 //
243 PageTableEntry->Bits.Nx = 1;
244 }
245 }
246
247 //
248 // Fill in 2M page entry.
249 //
250 *PageEntry2M = ((UINT64)(UINTN)PageTableEntry1 |
251 IA32_PG_P | IA32_PG_RW | AddressEncMask);
252 }
253
254 /**
255 Set one page of page table pool memory to be read-only.
256
257 @param[in] PageTableBase Base address of page table (CR3).
258 @param[in] Address Start address of a page to be set as read-only.
259 @param[in] Level4Paging Level 4 paging flag.
260
261 **/
262 STATIC
263 VOID
264 SetPageTablePoolReadOnly (
265 IN UINTN PageTableBase,
266 IN EFI_PHYSICAL_ADDRESS Address,
267 IN BOOLEAN Level4Paging
268 )
269 {
270 UINTN Index;
271 UINTN EntryIndex;
272 UINT64 AddressEncMask;
273 UINT64 ActiveAddressEncMask;
274 EFI_PHYSICAL_ADDRESS PhysicalAddress;
275 UINT64 *PageTable;
276 UINT64 *NewPageTable;
277 UINT64 PageAttr;
278 UINT64 LevelSize[5];
279 UINT64 LevelMask[5];
280 UINTN LevelShift[5];
281 UINTN Level;
282 UINT64 PoolUnitSize;
283
284 if (PageTableBase == 0) {
285 ASSERT (FALSE);
286 return;
287 }
288
289 //
290 // Since the page table is always from page table pool, which is always
291 // located at the boundary of PcdPageTablePoolAlignment, we just need to
292 // set the whole pool unit to be read-only.
293 //
294 Address = Address & PAGE_TABLE_POOL_ALIGN_MASK;
295
296 LevelShift[1] = PAGING_L1_ADDRESS_SHIFT;
297 LevelShift[2] = PAGING_L2_ADDRESS_SHIFT;
298 LevelShift[3] = PAGING_L3_ADDRESS_SHIFT;
299 LevelShift[4] = PAGING_L4_ADDRESS_SHIFT;
300
301 LevelMask[1] = PAGING_4K_ADDRESS_MASK_64;
302 LevelMask[2] = PAGING_2M_ADDRESS_MASK_64;
303 LevelMask[3] = PAGING_1G_ADDRESS_MASK_64;
304 LevelMask[4] = PAGING_1G_ADDRESS_MASK_64;
305
306 LevelSize[1] = SIZE_4KB;
307 LevelSize[2] = SIZE_2MB;
308 LevelSize[3] = SIZE_1GB;
309 LevelSize[4] = SIZE_512GB;
310
311 AddressEncMask = GetMemEncryptionAddressMask () &
312 PAGING_1G_ADDRESS_MASK_64;
313 PageTable = (UINT64 *)(UINTN)PageTableBase;
314 PoolUnitSize = PAGE_TABLE_POOL_UNIT_SIZE;
315
316 for (Level = (Level4Paging) ? 4 : 3; Level > 0; --Level) {
317 Index = ((UINTN)RShiftU64 (Address, LevelShift[Level]));
318 Index &= PAGING_PAE_INDEX_MASK;
319
320 PageAttr = PageTable[Index];
321 ActiveAddressEncMask = GetMemEncryptionAddressMask () & PageAttr;
322
323 if ((PageAttr & IA32_PG_PS) == 0) {
324 //
325 // Go to next level of table.
326 //
327 PageTable = (UINT64 *)(UINTN)(PageAttr & ~AddressEncMask &
328 PAGING_4K_ADDRESS_MASK_64);
329 continue;
330 }
331
332 if (PoolUnitSize >= LevelSize[Level]) {
333 //
334 // Clear R/W bit if current page granularity is not larger than pool unit
335 // size.
336 //
337 if ((PageAttr & IA32_PG_RW) != 0) {
338 while (PoolUnitSize > 0) {
339 //
340 // PAGE_TABLE_POOL_UNIT_SIZE and PAGE_TABLE_POOL_ALIGNMENT are fit in
341 // one page (2MB). Then we don't need to update attributes for pages
342 // crossing page directory. ASSERT below is for that purpose.
343 //
344 ASSERT (Index < EFI_PAGE_SIZE/sizeof (UINT64));
345
346 PageTable[Index] &= ~(UINT64)IA32_PG_RW;
347 PoolUnitSize -= LevelSize[Level];
348
349 ++Index;
350 }
351 }
352
353 break;
354 } else {
355 //
356 // The smaller granularity of page must be needed.
357 //
358 ASSERT (Level > 1);
359
360 NewPageTable = AllocatePageTableMemory (1);
361 if (NewPageTable == NULL) {
362 ASSERT (FALSE);
363 return;
364 }
365
366 PhysicalAddress = PageAttr & LevelMask[Level];
367 for (EntryIndex = 0;
368 EntryIndex < EFI_PAGE_SIZE/sizeof (UINT64);
369 ++EntryIndex)
370 {
371 NewPageTable[EntryIndex] = PhysicalAddress | ActiveAddressEncMask |
372 IA32_PG_P | IA32_PG_RW;
373 if (Level > 2) {
374 NewPageTable[EntryIndex] |= IA32_PG_PS;
375 }
376
377 PhysicalAddress += LevelSize[Level - 1];
378 }
379
380 PageTable[Index] = (UINT64)(UINTN)NewPageTable | ActiveAddressEncMask |
381 IA32_PG_P | IA32_PG_RW;
382 PageTable = NewPageTable;
383 }
384 }
385 }
386
387 /**
388 Prevent the memory pages used for page table from been overwritten.
389
390 @param[in] PageTableBase Base address of page table (CR3).
391 @param[in] Level4Paging Level 4 paging flag.
392
393 **/
394 STATIC
395 VOID
396 EnablePageTableProtection (
397 IN UINTN PageTableBase,
398 IN BOOLEAN Level4Paging
399 )
400 {
401 PAGE_TABLE_POOL *HeadPool;
402 PAGE_TABLE_POOL *Pool;
403 UINT64 PoolSize;
404 EFI_PHYSICAL_ADDRESS Address;
405
406 if (mPageTablePool == NULL) {
407 return;
408 }
409
410 //
411 // SetPageTablePoolReadOnly might update mPageTablePool. It's safer to
412 // remember original one in advance.
413 //
414 HeadPool = mPageTablePool;
415 Pool = HeadPool;
416 do {
417 Address = (EFI_PHYSICAL_ADDRESS)(UINTN)Pool;
418 PoolSize = Pool->Offset + EFI_PAGES_TO_SIZE (Pool->FreePages);
419
420 //
421 // The size of one pool must be multiple of PAGE_TABLE_POOL_UNIT_SIZE,
422 // which is one of page size of the processor (2MB by default). Let's apply
423 // the protection to them one by one.
424 //
425 while (PoolSize > 0) {
426 SetPageTablePoolReadOnly (PageTableBase, Address, Level4Paging);
427 Address += PAGE_TABLE_POOL_UNIT_SIZE;
428 PoolSize -= PAGE_TABLE_POOL_UNIT_SIZE;
429 }
430
431 Pool = Pool->NextPool;
432 } while (Pool != HeadPool);
433 }
434
435 /**
436 Split 1G page to 2M.
437
438 @param[in] PhysicalAddress Start physical address the 1G page
439 covered.
440 @param[in, out] PageEntry1G Pointer to 1G page entry.
441 @param[in] StackBase Stack base address.
442 @param[in] StackSize Stack size.
443
444 **/
445 STATIC
446 VOID
447 Split1GPageTo2M (
448 IN PHYSICAL_ADDRESS PhysicalAddress,
449 IN OUT UINT64 *PageEntry1G,
450 IN PHYSICAL_ADDRESS StackBase,
451 IN UINTN StackSize
452 )
453 {
454 PHYSICAL_ADDRESS PhysicalAddress2M;
455 UINTN IndexOfPageDirectoryEntries;
456 PAGE_TABLE_ENTRY *PageDirectoryEntry;
457 UINT64 AddressEncMask;
458 UINT64 ActiveAddressEncMask;
459
460 PageDirectoryEntry = AllocatePageTableMemory (1);
461 if (PageDirectoryEntry == NULL) {
462 return;
463 }
464
465 AddressEncMask = GetMemEncryptionAddressMask ();
466 ASSERT (PageDirectoryEntry != NULL);
467
468 ActiveAddressEncMask = *PageEntry1G & AddressEncMask;
469 //
470 // Fill in 1G page entry.
471 //
472 *PageEntry1G = ((UINT64)(UINTN)PageDirectoryEntry |
473 IA32_PG_P | IA32_PG_RW | ActiveAddressEncMask);
474
475 PhysicalAddress2M = PhysicalAddress;
476 for (IndexOfPageDirectoryEntries = 0;
477 IndexOfPageDirectoryEntries < 512;
478 (IndexOfPageDirectoryEntries++,
479 PageDirectoryEntry++,
480 PhysicalAddress2M += SIZE_2MB))
481 {
482 if ((PhysicalAddress2M < StackBase + StackSize) &&
483 ((PhysicalAddress2M + SIZE_2MB) > StackBase))
484 {
485 //
486 // Need to split this 2M page that covers stack range.
487 //
488 Split2MPageTo4K (
489 PhysicalAddress2M,
490 (UINT64 *)PageDirectoryEntry,
491 StackBase,
492 StackSize,
493 ActiveAddressEncMask
494 );
495 } else {
496 //
497 // Fill in the Page Directory entries
498 //
499 PageDirectoryEntry->Uint64 = (UINT64)PhysicalAddress2M | ActiveAddressEncMask;
500 PageDirectoryEntry->Bits.ReadWrite = 1;
501 PageDirectoryEntry->Bits.Present = 1;
502 PageDirectoryEntry->Bits.MustBe1 = 1;
503 }
504 }
505 }
506
507 /**
508 Set or Clear the memory shared bit
509
510 @param[in] PagetablePoint Page table entry pointer (PTE).
511 @param[in] Mode Set or Clear shared bit
512
513 @retval EFI_SUCCESS Successfully set or clear the memory shared bit
514 @retval Others Other error as indicated
515 **/
516 STATIC
517 EFI_STATUS
518 SetOrClearSharedBit (
519 IN OUT UINT64 *PageTablePointer,
520 IN TDX_PAGETABLE_MODE Mode,
521 IN PHYSICAL_ADDRESS PhysicalAddress,
522 IN UINT64 Length
523 )
524 {
525 UINT64 AddressEncMask;
526 UINT64 TdStatus;
527 EFI_STATUS Status;
528 EDKII_MEMORY_ACCEPT_PROTOCOL *MemoryAcceptProtocol;
529
530 AddressEncMask = GetMemEncryptionAddressMask ();
531
532 //
533 // Set or clear page table entry. Also, set shared bit in physical address, before calling MapGPA
534 //
535 if (Mode == SetSharedBit) {
536 *PageTablePointer |= AddressEncMask;
537 PhysicalAddress |= AddressEncMask;
538 } else {
539 *PageTablePointer &= ~AddressEncMask;
540 PhysicalAddress &= ~AddressEncMask;
541 }
542
543 TdStatus = TdVmCall (TDVMCALL_MAPGPA, PhysicalAddress, Length, 0, 0, NULL);
544 if (TdStatus != 0) {
545 DEBUG ((DEBUG_ERROR, "%a: TdVmcall(MAPGPA) failed with %llx\n", __FUNCTION__, TdStatus));
546 ASSERT (FALSE);
547 return EFI_DEVICE_ERROR;
548 }
549
550 //
551 // If changing shared to private, must accept-page again
552 //
553 if (Mode == ClearSharedBit) {
554 Status = gBS->LocateProtocol (&gEdkiiMemoryAcceptProtocolGuid, NULL, (VOID **)&MemoryAcceptProtocol);
555 if (EFI_ERROR (Status)) {
556 DEBUG ((DEBUG_ERROR, "%a: Failed to locate MemoryAcceptProtocol with %r\n", __FUNCTION__, Status));
557 ASSERT (FALSE);
558 return Status;
559 }
560
561 Status = MemoryAcceptProtocol->AcceptMemory (MemoryAcceptProtocol, PhysicalAddress, Length);
562 if (EFI_ERROR (Status)) {
563 DEBUG ((DEBUG_ERROR, "%a: Failed to AcceptMemory with %r\n", __FUNCTION__, Status));
564 ASSERT (FALSE);
565 return Status;
566 }
567 }
568
569 DEBUG ((
570 DEBUG_VERBOSE,
571 "%a:%a: pte=0x%Lx AddressEncMask=0x%Lx Mode=0x%x MapGPA Status=0x%x\n",
572 gEfiCallerBaseName,
573 __FUNCTION__,
574 *PageTablePointer,
575 AddressEncMask,
576 Mode,
577 Status
578 ));
579
580 return EFI_SUCCESS;
581 }
582
583 /**
584 Check the WP status in CR0 register. This bit is used to lock or unlock write
585 access to pages marked as read-only.
586
587 @retval TRUE Write protection is enabled.
588 @retval FALSE Write protection is disabled.
589 **/
590 STATIC
591 BOOLEAN
592 IsReadOnlyPageWriteProtected (
593 VOID
594 )
595 {
596 return ((AsmReadCr0 () & BIT16) != 0);
597 }
598
599 /**
600 Disable Write Protect on pages marked as read-only.
601 **/
602 STATIC
603 VOID
604 DisableReadOnlyPageWriteProtect (
605 VOID
606 )
607 {
608 AsmWriteCr0 (AsmReadCr0 () & ~BIT16);
609 }
610
611 /**
612 Enable Write Protect on pages marked as read-only.
613 **/
614 VOID
615 EnableReadOnlyPageWriteProtect (
616 VOID
617 )
618 {
619 AsmWriteCr0 (AsmReadCr0 () | BIT16);
620 }
621
622 /**
623 This function either sets or clears memory encryption for the memory
624 region specified by PhysicalAddress and Length from the current page table
625 context.
626
627 The function iterates through the PhysicalAddress one page at a time, and set
628 or clears the memory encryption in the page table. If it encounters
629 that a given physical address range is part of large page then it attempts to
630 change the attribute at one go (based on size), otherwise it splits the
631 large pages into smaller (e.g 2M page into 4K pages) and then try to set or
632 clear the shared bit on the smallest page size.
633
634 @param[in] Cr3BaseAddress Cr3 Base Address (if zero then use
635 current CR3)
636 @param[in] PhysicalAddress The physical address that is the start
637 address of a memory region.
638 @param[in] Length The length of memory region
639 @param[in] Mode Set or Clear mode
640
641 @retval RETURN_SUCCESS The attributes were cleared for the
642 memory region.
643 @retval RETURN_INVALID_PARAMETER Number of pages is zero.
644 @retval RETURN_UNSUPPORTED Setting the memory encyrption attribute
645 is not supported
646 **/
647 STATIC
648 RETURN_STATUS
649 EFIAPI
650 SetMemorySharedOrPrivate (
651 IN PHYSICAL_ADDRESS Cr3BaseAddress,
652 IN PHYSICAL_ADDRESS PhysicalAddress,
653 IN UINTN Length,
654 IN TDX_PAGETABLE_MODE Mode
655 )
656 {
657 PAGE_MAP_AND_DIRECTORY_POINTER *PageMapLevel4Entry;
658 PAGE_MAP_AND_DIRECTORY_POINTER *PageUpperDirectoryPointerEntry;
659 PAGE_MAP_AND_DIRECTORY_POINTER *PageDirectoryPointerEntry;
660 PAGE_TABLE_1G_ENTRY *PageDirectory1GEntry;
661 PAGE_TABLE_ENTRY *PageDirectory2MEntry;
662 PAGE_TABLE_4K_ENTRY *PageTableEntry;
663 UINT64 PgTableMask;
664 UINT64 AddressEncMask;
665 UINT64 ActiveEncMask;
666 BOOLEAN IsWpEnabled;
667 RETURN_STATUS Status;
668 IA32_CR4 Cr4;
669 BOOLEAN Page5LevelSupport;
670
671 //
672 // Set PageMapLevel4Entry to suppress incorrect compiler/analyzer warnings.
673 //
674 PageMapLevel4Entry = NULL;
675
676 DEBUG ((
677 DEBUG_VERBOSE,
678 "%a:%a: Cr3Base=0x%Lx Physical=0x%Lx Length=0x%Lx Mode=%a\n",
679 gEfiCallerBaseName,
680 __FUNCTION__,
681 Cr3BaseAddress,
682 PhysicalAddress,
683 (UINT64)Length,
684 (Mode == SetSharedBit) ? "Shared" : "Private"
685 ));
686
687 //
688 // Check if we have a valid memory encryption mask
689 //
690 AddressEncMask = GetMemEncryptionAddressMask ();
691
692 PgTableMask = AddressEncMask | EFI_PAGE_MASK;
693
694 if (Length == 0) {
695 return RETURN_INVALID_PARAMETER;
696 }
697
698 //
699 // Make sure that the page table is changeable.
700 //
701 IsWpEnabled = IsReadOnlyPageWriteProtected ();
702 if (IsWpEnabled) {
703 DisableReadOnlyPageWriteProtect ();
704 }
705
706 //
707 // If Cr3BaseAddress is not specified then read the current CR3
708 //
709 if (Cr3BaseAddress == 0) {
710 Cr3BaseAddress = AsmReadCr3 ();
711 }
712
713 //
714 // CPU will already have LA57 enabled so just check CR4
715 //
716 Cr4.UintN = AsmReadCr4 ();
717
718 Page5LevelSupport = (Cr4.Bits.LA57 ? TRUE : FALSE);
719 //
720 // If 5-level pages, adjust Cr3BaseAddress to point to first 4-level page directory,
721 // we will only have 1
722 //
723 if (Page5LevelSupport) {
724 Cr3BaseAddress = *(UINT64 *)Cr3BaseAddress & ~PgTableMask;
725 }
726
727 Status = EFI_SUCCESS;
728
729 while (Length) {
730 PageMapLevel4Entry = (VOID *)(Cr3BaseAddress & ~PgTableMask);
731 PageMapLevel4Entry += PML4_OFFSET (PhysicalAddress);
732 if (!PageMapLevel4Entry->Bits.Present) {
733 DEBUG ((
734 DEBUG_ERROR,
735 "%a:%a: bad PML4 for Physical=0x%Lx\n",
736 gEfiCallerBaseName,
737 __FUNCTION__,
738 PhysicalAddress
739 ));
740 Status = RETURN_NO_MAPPING;
741 goto Done;
742 }
743
744 PageDirectory1GEntry = (VOID *)(
745 (PageMapLevel4Entry->Bits.PageTableBaseAddress <<
746 12) & ~PgTableMask
747 );
748 PageDirectory1GEntry += PDP_OFFSET (PhysicalAddress);
749 if (!PageDirectory1GEntry->Bits.Present) {
750 DEBUG ((
751 DEBUG_ERROR,
752 "%a:%a: bad PDPE for Physical=0x%Lx\n",
753 gEfiCallerBaseName,
754 __FUNCTION__,
755 PhysicalAddress
756 ));
757 Status = RETURN_NO_MAPPING;
758 goto Done;
759 }
760
761 //
762 // If the MustBe1 bit is not 1, it's not actually a 1GB entry
763 //
764 if (PageDirectory1GEntry->Bits.MustBe1) {
765 //
766 // Valid 1GB page
767 // If we have at least 1GB to go, we can just update this entry
768 //
769 if (!(PhysicalAddress & (BIT30 - 1)) && (Length >= BIT30)) {
770 Status = SetOrClearSharedBit (&PageDirectory1GEntry->Uint64, Mode, PhysicalAddress, BIT30);
771 if (EFI_ERROR (Status)) {
772 goto Done;
773 }
774
775 DEBUG ((
776 DEBUG_VERBOSE,
777 "%a:%a: updated 1GB entry for Physical=0x%Lx\n",
778 gEfiCallerBaseName,
779 __FUNCTION__,
780 PhysicalAddress
781 ));
782 PhysicalAddress += BIT30;
783 Length -= BIT30;
784 } else {
785 //
786 // We must split the page
787 //
788 DEBUG ((
789 DEBUG_VERBOSE,
790 "%a:%a: splitting 1GB page for Physical=0x%Lx\n",
791 gEfiCallerBaseName,
792 __FUNCTION__,
793 PhysicalAddress
794 ));
795 Split1GPageTo2M (
796 (UINT64)PageDirectory1GEntry->Bits.PageTableBaseAddress << 30,
797 (UINT64 *)PageDirectory1GEntry,
798 0,
799 0
800 );
801 continue;
802 }
803 } else {
804 //
805 // Actually a PDP
806 //
807 PageUpperDirectoryPointerEntry =
808 (PAGE_MAP_AND_DIRECTORY_POINTER *)PageDirectory1GEntry;
809 PageDirectory2MEntry =
810 (VOID *)(
811 (PageUpperDirectoryPointerEntry->Bits.PageTableBaseAddress <<
812 12) & ~PgTableMask
813 );
814 PageDirectory2MEntry += PDE_OFFSET (PhysicalAddress);
815 if (!PageDirectory2MEntry->Bits.Present) {
816 DEBUG ((
817 DEBUG_ERROR,
818 "%a:%a: bad PDE for Physical=0x%Lx\n",
819 gEfiCallerBaseName,
820 __FUNCTION__,
821 PhysicalAddress
822 ));
823 Status = RETURN_NO_MAPPING;
824 goto Done;
825 }
826
827 //
828 // If the MustBe1 bit is not a 1, it's not a 2MB entry
829 //
830 if (PageDirectory2MEntry->Bits.MustBe1) {
831 //
832 // Valid 2MB page
833 // If we have at least 2MB left to go, we can just update this entry
834 //
835 if (!(PhysicalAddress & (BIT21-1)) && (Length >= BIT21)) {
836 Status = SetOrClearSharedBit (&PageDirectory2MEntry->Uint64, Mode, PhysicalAddress, BIT21);
837 if (EFI_ERROR (Status)) {
838 goto Done;
839 }
840
841 PhysicalAddress += BIT21;
842 Length -= BIT21;
843 } else {
844 //
845 // We must split up this page into 4K pages
846 //
847 DEBUG ((
848 DEBUG_VERBOSE,
849 "%a:%a: splitting 2MB page for Physical=0x%Lx\n",
850 gEfiCallerBaseName,
851 __FUNCTION__,
852 PhysicalAddress
853 ));
854
855 ActiveEncMask = PageDirectory2MEntry->Uint64 & AddressEncMask;
856
857 Split2MPageTo4K (
858 (UINT64)PageDirectory2MEntry->Bits.PageTableBaseAddress << 21,
859 (UINT64 *)PageDirectory2MEntry,
860 0,
861 0,
862 ActiveEncMask
863 );
864 continue;
865 }
866 } else {
867 PageDirectoryPointerEntry =
868 (PAGE_MAP_AND_DIRECTORY_POINTER *)PageDirectory2MEntry;
869 PageTableEntry =
870 (VOID *)(
871 (PageDirectoryPointerEntry->Bits.PageTableBaseAddress <<
872 12) & ~PgTableMask
873 );
874 PageTableEntry += PTE_OFFSET (PhysicalAddress);
875 if (!PageTableEntry->Bits.Present) {
876 DEBUG ((
877 DEBUG_ERROR,
878 "%a:%a: bad PTE for Physical=0x%Lx\n",
879 gEfiCallerBaseName,
880 __FUNCTION__,
881 PhysicalAddress
882 ));
883 Status = RETURN_NO_MAPPING;
884 goto Done;
885 }
886
887 Status = SetOrClearSharedBit (&PageTableEntry->Uint64, Mode, PhysicalAddress, EFI_PAGE_SIZE);
888 if (EFI_ERROR (Status)) {
889 goto Done;
890 }
891
892 PhysicalAddress += EFI_PAGE_SIZE;
893 Length -= EFI_PAGE_SIZE;
894 }
895 }
896 }
897
898 //
899 // Protect the page table by marking the memory used for page table to be
900 // read-only.
901 //
902 if (IsWpEnabled) {
903 EnablePageTableProtection ((UINTN)PageMapLevel4Entry, TRUE);
904 }
905
906 //
907 // Flush TLB
908 //
909 CpuFlushTlb ();
910
911 Done:
912 //
913 // Restore page table write protection, if any.
914 //
915 if (IsWpEnabled) {
916 EnableReadOnlyPageWriteProtect ();
917 }
918
919 return Status;
920 }
921
922 /**
923 This function clears memory shared bit for the memory region specified by
924 BaseAddress and NumPages from the current page table context.
925
926 @param[in] Cr3BaseAddress Cr3 Base Address (if zero then use
927 current CR3)
928 @param[in] BaseAddress The physical address that is the start
929 address of a memory region.
930 @param[in] NumPages The number of pages from start memory
931 region.
932
933 @retval RETURN_SUCCESS The attributes were cleared for the
934 memory region.
935 @retval RETURN_INVALID_PARAMETER Number of pages is zero.
936 @retval RETURN_UNSUPPORTED Clearing the memory encryption attribute
937 is not supported
938 **/
939 RETURN_STATUS
940 EFIAPI
941 MemEncryptTdxSetPageSharedBit (
942 IN PHYSICAL_ADDRESS Cr3BaseAddress,
943 IN PHYSICAL_ADDRESS BaseAddress,
944 IN UINTN NumPages
945 )
946 {
947 return SetMemorySharedOrPrivate (
948 Cr3BaseAddress,
949 BaseAddress,
950 EFI_PAGES_TO_SIZE (NumPages),
951 SetSharedBit
952 );
953 }
954
955 /**
956 This function sets memory shared bit for the memory region specified by
957 BaseAddress and NumPages from the current page table context.
958
959 @param[in] Cr3BaseAddress Cr3 Base Address (if zero then use
960 current CR3)
961 @param[in] BaseAddress The physical address that is the start
962 address of a memory region.
963 @param[in] NumPages The number of pages from start memory
964 region.
965
966 @retval RETURN_SUCCESS The attributes were set for the memory
967 region.
968 @retval RETURN_INVALID_PARAMETER Number of pages is zero.
969 @retval RETURN_UNSUPPORTED Setting the memory encryption attribute
970 is not supported
971 **/
972 RETURN_STATUS
973 EFIAPI
974 MemEncryptTdxClearPageSharedBit (
975 IN PHYSICAL_ADDRESS Cr3BaseAddress,
976 IN PHYSICAL_ADDRESS BaseAddress,
977 IN UINTN NumPages
978 )
979 {
980 return SetMemorySharedOrPrivate (
981 Cr3BaseAddress,
982 BaseAddress,
983 EFI_PAGES_TO_SIZE (NumPages),
984 ClearSharedBit
985 );
986 }