]> git.proxmox.com Git - mirror_edk2.git/blob - OvmfPkg/Library/BaseMemEncryptSevLib/X64/VirtualMemory.c
4185874c99b8b614e67061665917433b959d0dc6
[mirror_edk2.git] / OvmfPkg / Library / BaseMemEncryptSevLib / X64 / VirtualMemory.c
1 /** @file
2
3 Virtual Memory Management Services to set or clear the memory encryption bit
4
5 Copyright (c) 2006 - 2016, Intel Corporation. All rights reserved.<BR>
6 Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
7
8 This program and the accompanying materials
9 are licensed and made available under the terms and conditions of the BSD License
10 which accompanies this distribution. The full text of the license may be found at
11 http://opensource.org/licenses/bsd-license.php
12
13 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
14 WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
15
16 Code is derived from MdeModulePkg/Core/DxeIplPeim/X64/VirtualMemory.c
17
18 **/
19
20 #include <Library/CpuLib.h>
21 #include <Register/Cpuid.h>
22 #include <Register/Amd/Cpuid.h>
23
24 #include "VirtualMemory.h"
25
26 STATIC BOOLEAN mAddressEncMaskChecked = FALSE;
27 STATIC UINT64 mAddressEncMask;
28 STATIC PAGE_TABLE_POOL *mPageTablePool = NULL;
29
30 typedef enum {
31 SetCBit,
32 ClearCBit
33 } MAP_RANGE_MODE;
34
35 /**
36 Get the memory encryption mask
37
38 @param[out] EncryptionMask contains the pte mask.
39
40 **/
41 STATIC
42 UINT64
43 GetMemEncryptionAddressMask (
44 VOID
45 )
46 {
47 UINT64 EncryptionMask;
48 CPUID_MEMORY_ENCRYPTION_INFO_EBX Ebx;
49
50 if (mAddressEncMaskChecked) {
51 return mAddressEncMask;
52 }
53
54 //
55 // CPUID Fn8000_001F[EBX] Bit 0:5 (memory encryption bit position)
56 //
57 AsmCpuid (CPUID_MEMORY_ENCRYPTION_INFO, NULL, &Ebx.Uint32, NULL, NULL);
58 EncryptionMask = LShiftU64 (1, Ebx.Bits.PtePosBits);
59
60 mAddressEncMask = EncryptionMask & PAGING_1G_ADDRESS_MASK_64;
61 mAddressEncMaskChecked = TRUE;
62
63 return mAddressEncMask;
64 }
65
66 /**
67 Initialize a buffer pool for page table use only.
68
69 To reduce the potential split operation on page table, the pages reserved for
70 page table should be allocated in the times of PAGE_TABLE_POOL_UNIT_PAGES and
71 at the boundary of PAGE_TABLE_POOL_ALIGNMENT. So the page pool is always
72 initialized with number of pages greater than or equal to the given PoolPages.
73
74 Once the pages in the pool are used up, this method should be called again to
75 reserve at least another PAGE_TABLE_POOL_UNIT_PAGES. Usually this won't happen
76 often in practice.
77
78 @param[in] PoolPages The least page number of the pool to be created.
79
80 @retval TRUE The pool is initialized successfully.
81 @retval FALSE The memory is out of resource.
82 **/
83 STATIC
84 BOOLEAN
85 InitializePageTablePool (
86 IN UINTN PoolPages
87 )
88 {
89 VOID *Buffer;
90
91 //
92 // Always reserve at least PAGE_TABLE_POOL_UNIT_PAGES, including one page for
93 // header.
94 //
95 PoolPages += 1; // Add one page for header.
96 PoolPages = ((PoolPages - 1) / PAGE_TABLE_POOL_UNIT_PAGES + 1) *
97 PAGE_TABLE_POOL_UNIT_PAGES;
98 Buffer = AllocateAlignedPages (PoolPages, PAGE_TABLE_POOL_ALIGNMENT);
99 if (Buffer == NULL) {
100 DEBUG ((DEBUG_ERROR, "ERROR: Out of aligned pages\r\n"));
101 return FALSE;
102 }
103
104 //
105 // Link all pools into a list for easier track later.
106 //
107 if (mPageTablePool == NULL) {
108 mPageTablePool = Buffer;
109 mPageTablePool->NextPool = mPageTablePool;
110 } else {
111 ((PAGE_TABLE_POOL *)Buffer)->NextPool = mPageTablePool->NextPool;
112 mPageTablePool->NextPool = Buffer;
113 mPageTablePool = Buffer;
114 }
115
116 //
117 // Reserve one page for pool header.
118 //
119 mPageTablePool->FreePages = PoolPages - 1;
120 mPageTablePool->Offset = EFI_PAGES_TO_SIZE (1);
121
122 return TRUE;
123 }
124
125 /**
126 This API provides a way to allocate memory for page table.
127
128 This API can be called more than once to allocate memory for page tables.
129
130 Allocates the number of 4KB pages and returns a pointer to the allocated
131 buffer. The buffer returned is aligned on a 4KB boundary.
132
133 If Pages is 0, then NULL is returned.
134 If there is not enough memory remaining to satisfy the request, then NULL is
135 returned.
136
137 @param Pages The number of 4 KB pages to allocate.
138
139 @return A pointer to the allocated buffer or NULL if allocation fails.
140
141 **/
142 STATIC
143 VOID *
144 EFIAPI
145 AllocatePageTableMemory (
146 IN UINTN Pages
147 )
148 {
149 VOID *Buffer;
150
151 if (Pages == 0) {
152 return NULL;
153 }
154
155 //
156 // Renew the pool if necessary.
157 //
158 if (mPageTablePool == NULL ||
159 Pages > mPageTablePool->FreePages) {
160 if (!InitializePageTablePool (Pages)) {
161 return NULL;
162 }
163 }
164
165 Buffer = (UINT8 *)mPageTablePool + mPageTablePool->Offset;
166
167 mPageTablePool->Offset += EFI_PAGES_TO_SIZE (Pages);
168 mPageTablePool->FreePages -= Pages;
169
170 DEBUG ((
171 DEBUG_VERBOSE,
172 "%a:%a: Buffer=0x%Lx Pages=%ld\n",
173 gEfiCallerBaseName,
174 __FUNCTION__,
175 Buffer,
176 Pages
177 ));
178
179 return Buffer;
180 }
181
182
183 /**
184 Split 2M page to 4K.
185
186 @param[in] PhysicalAddress Start physical address the 2M page covered.
187 @param[in, out] PageEntry2M Pointer to 2M page entry.
188 @param[in] StackBase Stack base address.
189 @param[in] StackSize Stack size.
190
191 **/
192 STATIC
193 VOID
194 Split2MPageTo4K (
195 IN PHYSICAL_ADDRESS PhysicalAddress,
196 IN OUT UINT64 *PageEntry2M,
197 IN PHYSICAL_ADDRESS StackBase,
198 IN UINTN StackSize
199 )
200 {
201 PHYSICAL_ADDRESS PhysicalAddress4K;
202 UINTN IndexOfPageTableEntries;
203 PAGE_TABLE_4K_ENTRY *PageTableEntry, *PageTableEntry1;
204 UINT64 AddressEncMask;
205
206 PageTableEntry = AllocatePageTableMemory(1);
207
208 PageTableEntry1 = PageTableEntry;
209
210 AddressEncMask = GetMemEncryptionAddressMask ();
211
212 ASSERT (PageTableEntry != NULL);
213 ASSERT (*PageEntry2M & AddressEncMask);
214
215 PhysicalAddress4K = PhysicalAddress;
216 for (IndexOfPageTableEntries = 0; IndexOfPageTableEntries < 512; IndexOfPageTableEntries++, PageTableEntry++, PhysicalAddress4K += SIZE_4KB) {
217 //
218 // Fill in the Page Table entries
219 //
220 PageTableEntry->Uint64 = (UINT64) PhysicalAddress4K | AddressEncMask;
221 PageTableEntry->Bits.ReadWrite = 1;
222 PageTableEntry->Bits.Present = 1;
223 if ((PhysicalAddress4K >= StackBase) && (PhysicalAddress4K < StackBase + StackSize)) {
224 //
225 // Set Nx bit for stack.
226 //
227 PageTableEntry->Bits.Nx = 1;
228 }
229 }
230
231 //
232 // Fill in 2M page entry.
233 //
234 *PageEntry2M = (UINT64) (UINTN) PageTableEntry1 | IA32_PG_P | IA32_PG_RW | AddressEncMask;
235 }
236
237 /**
238 Set one page of page table pool memory to be read-only.
239
240 @param[in] PageTableBase Base address of page table (CR3).
241 @param[in] Address Start address of a page to be set as read-only.
242 @param[in] Level4Paging Level 4 paging flag.
243
244 **/
245 STATIC
246 VOID
247 SetPageTablePoolReadOnly (
248 IN UINTN PageTableBase,
249 IN EFI_PHYSICAL_ADDRESS Address,
250 IN BOOLEAN Level4Paging
251 )
252 {
253 UINTN Index;
254 UINTN EntryIndex;
255 UINT64 AddressEncMask;
256 EFI_PHYSICAL_ADDRESS PhysicalAddress;
257 UINT64 *PageTable;
258 UINT64 *NewPageTable;
259 UINT64 PageAttr;
260 UINT64 LevelSize[5];
261 UINT64 LevelMask[5];
262 UINTN LevelShift[5];
263 UINTN Level;
264 UINT64 PoolUnitSize;
265
266 ASSERT (PageTableBase != 0);
267
268 //
269 // Since the page table is always from page table pool, which is always
270 // located at the boundary of PcdPageTablePoolAlignment, we just need to
271 // set the whole pool unit to be read-only.
272 //
273 Address = Address & PAGE_TABLE_POOL_ALIGN_MASK;
274
275 LevelShift[1] = PAGING_L1_ADDRESS_SHIFT;
276 LevelShift[2] = PAGING_L2_ADDRESS_SHIFT;
277 LevelShift[3] = PAGING_L3_ADDRESS_SHIFT;
278 LevelShift[4] = PAGING_L4_ADDRESS_SHIFT;
279
280 LevelMask[1] = PAGING_4K_ADDRESS_MASK_64;
281 LevelMask[2] = PAGING_2M_ADDRESS_MASK_64;
282 LevelMask[3] = PAGING_1G_ADDRESS_MASK_64;
283 LevelMask[4] = PAGING_1G_ADDRESS_MASK_64;
284
285 LevelSize[1] = SIZE_4KB;
286 LevelSize[2] = SIZE_2MB;
287 LevelSize[3] = SIZE_1GB;
288 LevelSize[4] = SIZE_512GB;
289
290 AddressEncMask = GetMemEncryptionAddressMask() &
291 PAGING_1G_ADDRESS_MASK_64;
292 PageTable = (UINT64 *)(UINTN)PageTableBase;
293 PoolUnitSize = PAGE_TABLE_POOL_UNIT_SIZE;
294
295 for (Level = (Level4Paging) ? 4 : 3; Level > 0; --Level) {
296 Index = ((UINTN)RShiftU64 (Address, LevelShift[Level]));
297 Index &= PAGING_PAE_INDEX_MASK;
298
299 PageAttr = PageTable[Index];
300 if ((PageAttr & IA32_PG_PS) == 0) {
301 //
302 // Go to next level of table.
303 //
304 PageTable = (UINT64 *)(UINTN)(PageAttr & ~AddressEncMask &
305 PAGING_4K_ADDRESS_MASK_64);
306 continue;
307 }
308
309 if (PoolUnitSize >= LevelSize[Level]) {
310 //
311 // Clear R/W bit if current page granularity is not larger than pool unit
312 // size.
313 //
314 if ((PageAttr & IA32_PG_RW) != 0) {
315 while (PoolUnitSize > 0) {
316 //
317 // PAGE_TABLE_POOL_UNIT_SIZE and PAGE_TABLE_POOL_ALIGNMENT are fit in
318 // one page (2MB). Then we don't need to update attributes for pages
319 // crossing page directory. ASSERT below is for that purpose.
320 //
321 ASSERT (Index < EFI_PAGE_SIZE/sizeof (UINT64));
322
323 PageTable[Index] &= ~(UINT64)IA32_PG_RW;
324 PoolUnitSize -= LevelSize[Level];
325
326 ++Index;
327 }
328 }
329
330 break;
331
332 } else {
333 //
334 // The smaller granularity of page must be needed.
335 //
336 ASSERT (Level > 1);
337
338 NewPageTable = AllocatePageTableMemory (1);
339 ASSERT (NewPageTable != NULL);
340
341 PhysicalAddress = PageAttr & LevelMask[Level];
342 for (EntryIndex = 0;
343 EntryIndex < EFI_PAGE_SIZE/sizeof (UINT64);
344 ++EntryIndex) {
345 NewPageTable[EntryIndex] = PhysicalAddress | AddressEncMask |
346 IA32_PG_P | IA32_PG_RW;
347 if (Level > 2) {
348 NewPageTable[EntryIndex] |= IA32_PG_PS;
349 }
350 PhysicalAddress += LevelSize[Level - 1];
351 }
352
353 PageTable[Index] = (UINT64)(UINTN)NewPageTable | AddressEncMask |
354 IA32_PG_P | IA32_PG_RW;
355 PageTable = NewPageTable;
356 }
357 }
358 }
359
360 /**
361 Prevent the memory pages used for page table from been overwritten.
362
363 @param[in] PageTableBase Base address of page table (CR3).
364 @param[in] Level4Paging Level 4 paging flag.
365
366 **/
367 STATIC
368 VOID
369 EnablePageTableProtection (
370 IN UINTN PageTableBase,
371 IN BOOLEAN Level4Paging
372 )
373 {
374 PAGE_TABLE_POOL *HeadPool;
375 PAGE_TABLE_POOL *Pool;
376 UINT64 PoolSize;
377 EFI_PHYSICAL_ADDRESS Address;
378
379 if (mPageTablePool == NULL) {
380 return;
381 }
382
383 //
384 // SetPageTablePoolReadOnly might update mPageTablePool. It's safer to
385 // remember original one in advance.
386 //
387 HeadPool = mPageTablePool;
388 Pool = HeadPool;
389 do {
390 Address = (EFI_PHYSICAL_ADDRESS)(UINTN)Pool;
391 PoolSize = Pool->Offset + EFI_PAGES_TO_SIZE (Pool->FreePages);
392
393 //
394 // The size of one pool must be multiple of PAGE_TABLE_POOL_UNIT_SIZE, which
395 // is one of page size of the processor (2MB by default). Let's apply the
396 // protection to them one by one.
397 //
398 while (PoolSize > 0) {
399 SetPageTablePoolReadOnly(PageTableBase, Address, Level4Paging);
400 Address += PAGE_TABLE_POOL_UNIT_SIZE;
401 PoolSize -= PAGE_TABLE_POOL_UNIT_SIZE;
402 }
403
404 Pool = Pool->NextPool;
405 } while (Pool != HeadPool);
406
407 }
408
409
410 /**
411 Split 1G page to 2M.
412
413 @param[in] PhysicalAddress Start physical address the 1G page covered.
414 @param[in, out] PageEntry1G Pointer to 1G page entry.
415 @param[in] StackBase Stack base address.
416 @param[in] StackSize Stack size.
417
418 **/
419 STATIC
420 VOID
421 Split1GPageTo2M (
422 IN PHYSICAL_ADDRESS PhysicalAddress,
423 IN OUT UINT64 *PageEntry1G,
424 IN PHYSICAL_ADDRESS StackBase,
425 IN UINTN StackSize
426 )
427 {
428 PHYSICAL_ADDRESS PhysicalAddress2M;
429 UINTN IndexOfPageDirectoryEntries;
430 PAGE_TABLE_ENTRY *PageDirectoryEntry;
431 UINT64 AddressEncMask;
432
433 PageDirectoryEntry = AllocatePageTableMemory(1);
434
435 AddressEncMask = GetMemEncryptionAddressMask ();
436 ASSERT (PageDirectoryEntry != NULL);
437 ASSERT (*PageEntry1G & GetMemEncryptionAddressMask ());
438 //
439 // Fill in 1G page entry.
440 //
441 *PageEntry1G = (UINT64) (UINTN) PageDirectoryEntry | IA32_PG_P | IA32_PG_RW | AddressEncMask;
442
443 PhysicalAddress2M = PhysicalAddress;
444 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PhysicalAddress2M += SIZE_2MB) {
445 if ((PhysicalAddress2M < StackBase + StackSize) && ((PhysicalAddress2M + SIZE_2MB) > StackBase)) {
446 //
447 // Need to split this 2M page that covers stack range.
448 //
449 Split2MPageTo4K (PhysicalAddress2M, (UINT64 *) PageDirectoryEntry, StackBase, StackSize);
450 } else {
451 //
452 // Fill in the Page Directory entries
453 //
454 PageDirectoryEntry->Uint64 = (UINT64) PhysicalAddress2M | AddressEncMask;
455 PageDirectoryEntry->Bits.ReadWrite = 1;
456 PageDirectoryEntry->Bits.Present = 1;
457 PageDirectoryEntry->Bits.MustBe1 = 1;
458 }
459 }
460 }
461
462
463 /**
464 Set or Clear the memory encryption bit
465
466 @param[in] PagetablePoint Page table entry pointer (PTE).
467 @param[in] Mode Set or Clear encryption bit
468
469 **/
470 STATIC VOID
471 SetOrClearCBit(
472 IN OUT UINT64* PageTablePointer,
473 IN MAP_RANGE_MODE Mode
474 )
475 {
476 UINT64 AddressEncMask;
477
478 AddressEncMask = GetMemEncryptionAddressMask ();
479
480 if (Mode == SetCBit) {
481 *PageTablePointer |= AddressEncMask;
482 } else {
483 *PageTablePointer &= ~AddressEncMask;
484 }
485
486 }
487
488 /**
489 Check the WP status in CR0 register. This bit is used to lock or unlock write
490 access to pages marked as read-only.
491
492 @retval TRUE Write protection is enabled.
493 @retval FALSE Write protection is disabled.
494 **/
495 STATIC
496 BOOLEAN
497 IsReadOnlyPageWriteProtected (
498 VOID
499 )
500 {
501 return ((AsmReadCr0 () & BIT16) != 0);
502 }
503
504
505 /**
506 Disable Write Protect on pages marked as read-only.
507 **/
508 STATIC
509 VOID
510 DisableReadOnlyPageWriteProtect (
511 VOID
512 )
513 {
514 AsmWriteCr0 (AsmReadCr0() & ~BIT16);
515 }
516
517 /**
518 Enable Write Protect on pages marked as read-only.
519 **/
520 VOID
521 EnableReadOnlyPageWriteProtect (
522 VOID
523 )
524 {
525 AsmWriteCr0 (AsmReadCr0() | BIT16);
526 }
527
528
529 /**
530 This function either sets or clears memory encryption bit for the memory region
531 specified by PhysicalAddress and length from the current page table context.
532
533 The function iterates through the physicalAddress one page at a time, and set
534 or clears the memory encryption mask in the page table. If it encounters
535 that a given physical address range is part of large page then it attempts to
536 change the attribute at one go (based on size), otherwise it splits the
537 large pages into smaller (e.g 2M page into 4K pages) and then try to set or
538 clear the encryption bit on the smallest page size.
539
540 @param[in] PhysicalAddress The physical address that is the start
541 address of a memory region.
542 @param[in] Length The length of memory region
543 @param[in] Mode Set or Clear mode
544 @param[in] Flush Flush the caches before applying the
545 encryption mask
546
547 @retval RETURN_SUCCESS The attributes were cleared for the memory
548 region.
549 @retval RETURN_INVALID_PARAMETER Number of pages is zero.
550 @retval RETURN_UNSUPPORTED Setting the memory encyrption attribute is
551 not supported
552 **/
553
554 STATIC
555 RETURN_STATUS
556 EFIAPI
557 SetMemoryEncDec (
558 IN PHYSICAL_ADDRESS Cr3BaseAddress,
559 IN PHYSICAL_ADDRESS PhysicalAddress,
560 IN UINTN Length,
561 IN MAP_RANGE_MODE Mode,
562 IN BOOLEAN CacheFlush
563 )
564 {
565 PAGE_MAP_AND_DIRECTORY_POINTER *PageMapLevel4Entry;
566 PAGE_MAP_AND_DIRECTORY_POINTER *PageUpperDirectoryPointerEntry;
567 PAGE_MAP_AND_DIRECTORY_POINTER *PageDirectoryPointerEntry;
568 PAGE_TABLE_1G_ENTRY *PageDirectory1GEntry;
569 PAGE_TABLE_ENTRY *PageDirectory2MEntry;
570 PAGE_TABLE_4K_ENTRY *PageTableEntry;
571 UINT64 PgTableMask;
572 UINT64 AddressEncMask;
573 BOOLEAN IsWpEnabled;
574 RETURN_STATUS Status;
575
576 DEBUG ((
577 DEBUG_VERBOSE,
578 "%a:%a: Cr3Base=0x%Lx Physical=0x%Lx Length=0x%Lx Mode=%a CacheFlush=%u\n",
579 gEfiCallerBaseName,
580 __FUNCTION__,
581 Cr3BaseAddress,
582 PhysicalAddress,
583 (UINT64)Length,
584 (Mode == SetCBit) ? "Encrypt" : "Decrypt",
585 (UINT32)CacheFlush
586 ));
587
588 //
589 // Check if we have a valid memory encryption mask
590 //
591 AddressEncMask = GetMemEncryptionAddressMask ();
592 if (!AddressEncMask) {
593 return RETURN_ACCESS_DENIED;
594 }
595
596 PgTableMask = AddressEncMask | EFI_PAGE_MASK;
597
598 if (Length == 0) {
599 return RETURN_INVALID_PARAMETER;
600 }
601
602 //
603 // We are going to change the memory encryption attribute from C=0 -> C=1 or
604 // vice versa Flush the caches to ensure that data is written into memory with
605 // correct C-bit
606 //
607 if (CacheFlush) {
608 WriteBackInvalidateDataCacheRange((VOID*) (UINTN)PhysicalAddress, Length);
609 }
610
611 //
612 // Make sure that the page table is changeable.
613 //
614 IsWpEnabled = IsReadOnlyPageWriteProtected ();
615 if (IsWpEnabled) {
616 DisableReadOnlyPageWriteProtect ();
617 }
618
619 Status = EFI_SUCCESS;
620
621 while (Length)
622 {
623 //
624 // If Cr3BaseAddress is not specified then read the current CR3
625 //
626 if (Cr3BaseAddress == 0) {
627 Cr3BaseAddress = AsmReadCr3();
628 }
629
630 PageMapLevel4Entry = (VOID*) (Cr3BaseAddress & ~PgTableMask);
631 PageMapLevel4Entry += PML4_OFFSET(PhysicalAddress);
632 if (!PageMapLevel4Entry->Bits.Present) {
633 DEBUG ((
634 DEBUG_ERROR,
635 "%a:%a: bad PML4 for Physical=0x%Lx\n",
636 gEfiCallerBaseName,
637 __FUNCTION__,
638 PhysicalAddress
639 ));
640 Status = RETURN_NO_MAPPING;
641 goto Done;
642 }
643
644 PageDirectory1GEntry = (VOID*) ((PageMapLevel4Entry->Bits.PageTableBaseAddress<<12) & ~PgTableMask);
645 PageDirectory1GEntry += PDP_OFFSET(PhysicalAddress);
646 if (!PageDirectory1GEntry->Bits.Present) {
647 DEBUG ((
648 DEBUG_ERROR,
649 "%a:%a: bad PDPE for Physical=0x%Lx\n",
650 gEfiCallerBaseName,
651 __FUNCTION__,
652 PhysicalAddress
653 ));
654 Status = RETURN_NO_MAPPING;
655 goto Done;
656 }
657
658 //
659 // If the MustBe1 bit is not 1, it's not actually a 1GB entry
660 //
661 if (PageDirectory1GEntry->Bits.MustBe1) {
662 //
663 // Valid 1GB page
664 // If we have at least 1GB to go, we can just update this entry
665 //
666 if (!(PhysicalAddress & (BIT30 - 1)) && Length >= BIT30) {
667 SetOrClearCBit(&PageDirectory1GEntry->Uint64, Mode);
668 DEBUG ((
669 DEBUG_VERBOSE,
670 "%a:%a: updated 1GB entry for Physical=0x%Lx\n",
671 gEfiCallerBaseName,
672 __FUNCTION__,
673 PhysicalAddress
674 ));
675 PhysicalAddress += BIT30;
676 Length -= BIT30;
677 } else {
678 //
679 // We must split the page
680 //
681 DEBUG ((
682 DEBUG_VERBOSE,
683 "%a:%a: splitting 1GB page for Physical=0x%Lx\n",
684 gEfiCallerBaseName,
685 __FUNCTION__,
686 PhysicalAddress
687 ));
688 Split1GPageTo2M(((UINT64)PageDirectory1GEntry->Bits.PageTableBaseAddress)<<30, (UINT64*) PageDirectory1GEntry, 0, 0);
689 continue;
690 }
691 } else {
692 //
693 // Actually a PDP
694 //
695 PageUpperDirectoryPointerEntry = (PAGE_MAP_AND_DIRECTORY_POINTER*) PageDirectory1GEntry;
696 PageDirectory2MEntry = (VOID*) ((PageUpperDirectoryPointerEntry->Bits.PageTableBaseAddress<<12) & ~PgTableMask);
697 PageDirectory2MEntry += PDE_OFFSET(PhysicalAddress);
698 if (!PageDirectory2MEntry->Bits.Present) {
699 DEBUG ((
700 DEBUG_ERROR,
701 "%a:%a: bad PDE for Physical=0x%Lx\n",
702 gEfiCallerBaseName,
703 __FUNCTION__,
704 PhysicalAddress
705 ));
706 Status = RETURN_NO_MAPPING;
707 goto Done;
708 }
709 //
710 // If the MustBe1 bit is not a 1, it's not a 2MB entry
711 //
712 if (PageDirectory2MEntry->Bits.MustBe1) {
713 //
714 // Valid 2MB page
715 // If we have at least 2MB left to go, we can just update this entry
716 //
717 if (!(PhysicalAddress & (BIT21-1)) && Length >= BIT21) {
718 SetOrClearCBit (&PageDirectory2MEntry->Uint64, Mode);
719 PhysicalAddress += BIT21;
720 Length -= BIT21;
721 } else {
722 //
723 // We must split up this page into 4K pages
724 //
725 DEBUG ((
726 DEBUG_VERBOSE,
727 "%a:%a: splitting 2MB page for Physical=0x%Lx\n",
728 gEfiCallerBaseName,
729 __FUNCTION__,
730 PhysicalAddress
731 ));
732 Split2MPageTo4K (((UINT64)PageDirectory2MEntry->Bits.PageTableBaseAddress) << 21, (UINT64*) PageDirectory2MEntry, 0, 0);
733 continue;
734 }
735 } else {
736 PageDirectoryPointerEntry = (PAGE_MAP_AND_DIRECTORY_POINTER*) PageDirectory2MEntry;
737 PageTableEntry = (VOID*) (PageDirectoryPointerEntry->Bits.PageTableBaseAddress<<12 & ~PgTableMask);
738 PageTableEntry += PTE_OFFSET(PhysicalAddress);
739 if (!PageTableEntry->Bits.Present) {
740 DEBUG ((
741 DEBUG_ERROR,
742 "%a:%a: bad PTE for Physical=0x%Lx\n",
743 gEfiCallerBaseName,
744 __FUNCTION__,
745 PhysicalAddress
746 ));
747 Status = RETURN_NO_MAPPING;
748 goto Done;
749 }
750 SetOrClearCBit (&PageTableEntry->Uint64, Mode);
751 PhysicalAddress += EFI_PAGE_SIZE;
752 Length -= EFI_PAGE_SIZE;
753 }
754 }
755 }
756
757 //
758 // Protect the page table by marking the memory used for page table to be
759 // read-only.
760 //
761 if (IsWpEnabled) {
762 EnablePageTableProtection ((UINTN)PageMapLevel4Entry, TRUE);
763 }
764
765 //
766 // Flush TLB
767 //
768 CpuFlushTlb();
769
770 Done:
771 //
772 // Restore page table write protection, if any.
773 //
774 if (IsWpEnabled) {
775 EnableReadOnlyPageWriteProtect ();
776 }
777
778 return Status;
779 }
780
781 /**
782 This function clears memory encryption bit for the memory region specified by
783 PhysicalAddress and length from the current page table context.
784
785 @param[in] PhysicalAddress The physical address that is the start
786 address of a memory region.
787 @param[in] Length The length of memory region
788 @param[in] Flush Flush the caches before applying the
789 encryption mask
790
791 @retval RETURN_SUCCESS The attributes were cleared for the memory
792 region.
793 @retval RETURN_INVALID_PARAMETER Number of pages is zero.
794 @retval RETURN_UNSUPPORTED Setting the memory encyrption attribute is
795 not supported
796 **/
797 RETURN_STATUS
798 EFIAPI
799 InternalMemEncryptSevSetMemoryDecrypted (
800 IN PHYSICAL_ADDRESS Cr3BaseAddress,
801 IN PHYSICAL_ADDRESS PhysicalAddress,
802 IN UINTN Length,
803 IN BOOLEAN Flush
804 )
805 {
806
807 return SetMemoryEncDec (Cr3BaseAddress, PhysicalAddress, Length, ClearCBit, Flush);
808 }
809
810 /**
811 This function sets memory encryption bit for the memory region specified by
812 PhysicalAddress and length from the current page table context.
813
814 @param[in] PhysicalAddress The physical address that is the start address
815 of a memory region.
816 @param[in] Length The length of memory region
817 @param[in] Flush Flush the caches before applying the
818 encryption mask
819
820 @retval RETURN_SUCCESS The attributes were cleared for the memory
821 region.
822 @retval RETURN_INVALID_PARAMETER Number of pages is zero.
823 @retval RETURN_UNSUPPORTED Setting the memory encyrption attribute is
824 not supported
825 **/
826 RETURN_STATUS
827 EFIAPI
828 InternalMemEncryptSevSetMemoryEncrypted (
829 IN PHYSICAL_ADDRESS Cr3BaseAddress,
830 IN PHYSICAL_ADDRESS PhysicalAddress,
831 IN UINTN Length,
832 IN BOOLEAN Flush
833 )
834 {
835 return SetMemoryEncDec (Cr3BaseAddress, PhysicalAddress, Length, SetCBit, Flush);
836 }