]> git.proxmox.com Git - mirror_edk2.git/blame - OvmfPkg/Library/BaseMemEncryptTdxLib/MemoryEncryption.c
OvmfPkg/BaseMemEncryptTdxLib: Add TDX helper library
[mirror_edk2.git] / OvmfPkg / Library / BaseMemEncryptTdxLib / MemoryEncryption.c
CommitLineData
5aa80186
MX
1/** @file\r
2\r
3 Virtual Memory Management Services to set or clear the memory encryption.\r
4\r
5 Copyright (c) 2006 - 2018, Intel Corporation. All rights reserved.<BR>\r
6 Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>\r
7\r
8 SPDX-License-Identifier: BSD-2-Clause-Patent\r
9\r
10 Code is derived from MdeModulePkg/Core/DxeIplPeim/X64/VirtualMemory.c\r
11\r
12 Note:\r
13 There a lot of duplicated codes for Page Table operations. These\r
14 codes should be moved to a common library (PageTablesLib) so that it is\r
15 more friendly for review and maintain. There is a new feature requirement\r
16 https://bugzilla.tianocore.org/show_bug.cgi?id=847 which is to implement\r
17 the library. After the lib is introduced, this file will be refactored.\r
18\r
19**/\r
20\r
21#include <Uefi.h>\r
22#include <Uefi/UefiBaseType.h>\r
23#include <Library/CpuLib.h>\r
24#include <Library/BaseLib.h>\r
25#include <Library/DebugLib.h>\r
26#include <Library/MemEncryptTdxLib.h>\r
27#include "VirtualMemory.h"\r
28#include <IndustryStandard/Tdx.h>\r
29#include <Library/TdxLib.h>\r
30#include <ConfidentialComputingGuestAttr.h>\r
31\r
32typedef enum {\r
33 SetSharedBit,\r
34 ClearSharedBit\r
35} TDX_PAGETABLE_MODE;\r
36\r
37STATIC PAGE_TABLE_POOL *mPageTablePool = NULL;\r
38\r
39/**\r
40 Returns boolean to indicate whether to indicate which, if any, memory encryption is enabled\r
41\r
42 @param[in] Type Bitmask of encryption technologies to check is enabled\r
43\r
44 @retval TRUE The encryption type(s) are enabled\r
45 @retval FALSE The encryption type(s) are not enabled\r
46**/\r
47BOOLEAN\r
48EFIAPI\r
49MemEncryptTdxIsEnabled (\r
50 VOID\r
51 )\r
52{\r
53 return CC_GUEST_IS_TDX (PcdGet64 (PcdConfidentialComputingGuestAttr));\r
54}\r
55\r
56/**\r
57 Get the memory encryption mask\r
58\r
59 @param[out] EncryptionMask contains the pte mask.\r
60\r
61**/\r
62STATIC\r
63UINT64\r
64GetMemEncryptionAddressMask (\r
65 VOID\r
66 )\r
67{\r
68 return TdSharedPageMask ();\r
69}\r
70\r
71/**\r
72 Initialize a buffer pool for page table use only.\r
73\r
74 To reduce the potential split operation on page table, the pages reserved for\r
75 page table should be allocated in the times of PAGE_TABLE_POOL_UNIT_PAGES and\r
76 at the boundary of PAGE_TABLE_POOL_ALIGNMENT. So the page pool is always\r
77 initialized with number of pages greater than or equal to the given\r
78 PoolPages.\r
79\r
80 Once the pages in the pool are used up, this method should be called again to\r
81 reserve at least another PAGE_TABLE_POOL_UNIT_PAGES. Usually this won't\r
82 happen often in practice.\r
83\r
84 @param[in] PoolPages The least page number of the pool to be created.\r
85\r
86 @retval TRUE The pool is initialized successfully.\r
87 @retval FALSE The memory is out of resource.\r
88**/\r
89STATIC\r
90BOOLEAN\r
91InitializePageTablePool (\r
92 IN UINTN PoolPages\r
93 )\r
94{\r
95 VOID *Buffer;\r
96\r
97 //\r
98 // Always reserve at least PAGE_TABLE_POOL_UNIT_PAGES, including one page for\r
99 // header.\r
100 //\r
101 PoolPages += 1; // Add one page for header.\r
102 PoolPages = ((PoolPages - 1) / PAGE_TABLE_POOL_UNIT_PAGES + 1) *\r
103 PAGE_TABLE_POOL_UNIT_PAGES;\r
104 Buffer = AllocateAlignedPages (PoolPages, PAGE_TABLE_POOL_ALIGNMENT);\r
105 if (Buffer == NULL) {\r
106 DEBUG ((DEBUG_ERROR, "ERROR: Out of aligned pages\r\n"));\r
107 return FALSE;\r
108 }\r
109\r
110 //\r
111 // Link all pools into a list for easier track later.\r
112 //\r
113 if (mPageTablePool == NULL) {\r
114 mPageTablePool = Buffer;\r
115 mPageTablePool->NextPool = mPageTablePool;\r
116 } else {\r
117 ((PAGE_TABLE_POOL *)Buffer)->NextPool = mPageTablePool->NextPool;\r
118 mPageTablePool->NextPool = Buffer;\r
119 mPageTablePool = Buffer;\r
120 }\r
121\r
122 //\r
123 // Reserve one page for pool header.\r
124 //\r
125 mPageTablePool->FreePages = PoolPages - 1;\r
126 mPageTablePool->Offset = EFI_PAGES_TO_SIZE (1);\r
127\r
128 return TRUE;\r
129}\r
130\r
131/**\r
132 This API provides a way to allocate memory for page table.\r
133\r
134 This API can be called more than once to allocate memory for page tables.\r
135\r
136 Allocates the number of 4KB pages and returns a pointer to the allocated\r
137 buffer. The buffer returned is aligned on a 4KB boundary.\r
138\r
139 If Pages is 0, then NULL is returned.\r
140 If there is not enough memory remaining to satisfy the request, then NULL is\r
141 returned.\r
142\r
143 @param Pages The number of 4 KB pages to allocate.\r
144\r
145 @return A pointer to the allocated buffer or NULL if allocation fails.\r
146\r
147**/\r
148STATIC\r
149VOID *\r
150EFIAPI\r
151AllocatePageTableMemory (\r
152 IN UINTN Pages\r
153 )\r
154{\r
155 VOID *Buffer;\r
156\r
157 if (Pages == 0) {\r
158 return NULL;\r
159 }\r
160\r
161 //\r
162 // Renew the pool if necessary.\r
163 //\r
164 if ((mPageTablePool == NULL) ||\r
165 (Pages > mPageTablePool->FreePages))\r
166 {\r
167 if (!InitializePageTablePool (Pages)) {\r
168 return NULL;\r
169 }\r
170 }\r
171\r
172 Buffer = (UINT8 *)mPageTablePool + mPageTablePool->Offset;\r
173\r
174 mPageTablePool->Offset += EFI_PAGES_TO_SIZE (Pages);\r
175 mPageTablePool->FreePages -= Pages;\r
176\r
177 DEBUG ((\r
178 DEBUG_VERBOSE,\r
179 "%a:%a: Buffer=0x%Lx Pages=%ld\n",\r
180 gEfiCallerBaseName,\r
181 __FUNCTION__,\r
182 Buffer,\r
183 Pages\r
184 ));\r
185\r
186 return Buffer;\r
187}\r
188\r
189/**\r
190 Split 2M page to 4K.\r
191\r
192 @param[in] PhysicalAddress Start physical address the 2M page\r
193 covered.\r
194 @param[in, out] PageEntry2M Pointer to 2M page entry.\r
195 @param[in] StackBase Stack base address.\r
196 @param[in] StackSize Stack size.\r
197\r
198**/\r
199STATIC\r
200VOID\r
201Split2MPageTo4K (\r
202 IN PHYSICAL_ADDRESS PhysicalAddress,\r
203 IN OUT UINT64 *PageEntry2M,\r
204 IN PHYSICAL_ADDRESS StackBase,\r
205 IN UINTN StackSize,\r
206 IN UINT64 AddressEncMask\r
207 )\r
208{\r
209 PHYSICAL_ADDRESS PhysicalAddress4K;\r
210 UINTN IndexOfPageTableEntries;\r
211 PAGE_TABLE_4K_ENTRY *PageTableEntry, *PageTableEntry1;\r
212\r
213 PageTableEntry = AllocatePageTableMemory (1);\r
214\r
215 PageTableEntry1 = PageTableEntry;\r
216\r
217 if (PageTableEntry == NULL) {\r
218 ASSERT (FALSE);\r
219 return;\r
220 }\r
221\r
222 PhysicalAddress4K = PhysicalAddress;\r
223 for (IndexOfPageTableEntries = 0;\r
224 IndexOfPageTableEntries < 512;\r
225 (IndexOfPageTableEntries++,\r
226 PageTableEntry++,\r
227 PhysicalAddress4K += SIZE_4KB))\r
228 {\r
229 //\r
230 // Fill in the Page Table entries\r
231 //\r
232 PageTableEntry->Uint64 = (UINT64)PhysicalAddress4K | AddressEncMask;\r
233 PageTableEntry->Bits.ReadWrite = 1;\r
234 PageTableEntry->Bits.Present = 1;\r
235 if ((PhysicalAddress4K >= StackBase) &&\r
236 (PhysicalAddress4K < StackBase + StackSize))\r
237 {\r
238 //\r
239 // Set Nx bit for stack.\r
240 //\r
241 PageTableEntry->Bits.Nx = 1;\r
242 }\r
243 }\r
244\r
245 //\r
246 // Fill in 2M page entry.\r
247 //\r
248 *PageEntry2M = ((UINT64)(UINTN)PageTableEntry1 |\r
249 IA32_PG_P | IA32_PG_RW | AddressEncMask);\r
250}\r
251\r
252/**\r
253 Set one page of page table pool memory to be read-only.\r
254\r
255 @param[in] PageTableBase Base address of page table (CR3).\r
256 @param[in] Address Start address of a page to be set as read-only.\r
257 @param[in] Level4Paging Level 4 paging flag.\r
258\r
259**/\r
260STATIC\r
261VOID\r
262SetPageTablePoolReadOnly (\r
263 IN UINTN PageTableBase,\r
264 IN EFI_PHYSICAL_ADDRESS Address,\r
265 IN BOOLEAN Level4Paging\r
266 )\r
267{\r
268 UINTN Index;\r
269 UINTN EntryIndex;\r
270 UINT64 AddressEncMask;\r
271 UINT64 ActiveAddressEncMask;\r
272 EFI_PHYSICAL_ADDRESS PhysicalAddress;\r
273 UINT64 *PageTable;\r
274 UINT64 *NewPageTable;\r
275 UINT64 PageAttr;\r
276 UINT64 LevelSize[5];\r
277 UINT64 LevelMask[5];\r
278 UINTN LevelShift[5];\r
279 UINTN Level;\r
280 UINT64 PoolUnitSize;\r
281\r
282 if (PageTableBase == 0) {\r
283 ASSERT (FALSE);\r
284 return;\r
285 }\r
286\r
287 //\r
288 // Since the page table is always from page table pool, which is always\r
289 // located at the boundary of PcdPageTablePoolAlignment, we just need to\r
290 // set the whole pool unit to be read-only.\r
291 //\r
292 Address = Address & PAGE_TABLE_POOL_ALIGN_MASK;\r
293\r
294 LevelShift[1] = PAGING_L1_ADDRESS_SHIFT;\r
295 LevelShift[2] = PAGING_L2_ADDRESS_SHIFT;\r
296 LevelShift[3] = PAGING_L3_ADDRESS_SHIFT;\r
297 LevelShift[4] = PAGING_L4_ADDRESS_SHIFT;\r
298\r
299 LevelMask[1] = PAGING_4K_ADDRESS_MASK_64;\r
300 LevelMask[2] = PAGING_2M_ADDRESS_MASK_64;\r
301 LevelMask[3] = PAGING_1G_ADDRESS_MASK_64;\r
302 LevelMask[4] = PAGING_1G_ADDRESS_MASK_64;\r
303\r
304 LevelSize[1] = SIZE_4KB;\r
305 LevelSize[2] = SIZE_2MB;\r
306 LevelSize[3] = SIZE_1GB;\r
307 LevelSize[4] = SIZE_512GB;\r
308\r
309 AddressEncMask = GetMemEncryptionAddressMask () &\r
310 PAGING_1G_ADDRESS_MASK_64;\r
311 PageTable = (UINT64 *)(UINTN)PageTableBase;\r
312 PoolUnitSize = PAGE_TABLE_POOL_UNIT_SIZE;\r
313\r
314 for (Level = (Level4Paging) ? 4 : 3; Level > 0; --Level) {\r
315 Index = ((UINTN)RShiftU64 (Address, LevelShift[Level]));\r
316 Index &= PAGING_PAE_INDEX_MASK;\r
317\r
318 PageAttr = PageTable[Index];\r
319 ActiveAddressEncMask = GetMemEncryptionAddressMask () & PageAttr;\r
320\r
321 if ((PageAttr & IA32_PG_PS) == 0) {\r
322 //\r
323 // Go to next level of table.\r
324 //\r
325 PageTable = (UINT64 *)(UINTN)(PageAttr & ~AddressEncMask &\r
326 PAGING_4K_ADDRESS_MASK_64);\r
327 continue;\r
328 }\r
329\r
330 if (PoolUnitSize >= LevelSize[Level]) {\r
331 //\r
332 // Clear R/W bit if current page granularity is not larger than pool unit\r
333 // size.\r
334 //\r
335 if ((PageAttr & IA32_PG_RW) != 0) {\r
336 while (PoolUnitSize > 0) {\r
337 //\r
338 // PAGE_TABLE_POOL_UNIT_SIZE and PAGE_TABLE_POOL_ALIGNMENT are fit in\r
339 // one page (2MB). Then we don't need to update attributes for pages\r
340 // crossing page directory. ASSERT below is for that purpose.\r
341 //\r
342 ASSERT (Index < EFI_PAGE_SIZE/sizeof (UINT64));\r
343\r
344 PageTable[Index] &= ~(UINT64)IA32_PG_RW;\r
345 PoolUnitSize -= LevelSize[Level];\r
346\r
347 ++Index;\r
348 }\r
349 }\r
350\r
351 break;\r
352 } else {\r
353 //\r
354 // The smaller granularity of page must be needed.\r
355 //\r
356 ASSERT (Level > 1);\r
357\r
358 NewPageTable = AllocatePageTableMemory (1);\r
359 if (NewPageTable == NULL) {\r
360 ASSERT (FALSE);\r
361 return;\r
362 }\r
363\r
364 PhysicalAddress = PageAttr & LevelMask[Level];\r
365 for (EntryIndex = 0;\r
366 EntryIndex < EFI_PAGE_SIZE/sizeof (UINT64);\r
367 ++EntryIndex)\r
368 {\r
369 NewPageTable[EntryIndex] = PhysicalAddress | ActiveAddressEncMask |\r
370 IA32_PG_P | IA32_PG_RW;\r
371 if (Level > 2) {\r
372 NewPageTable[EntryIndex] |= IA32_PG_PS;\r
373 }\r
374\r
375 PhysicalAddress += LevelSize[Level - 1];\r
376 }\r
377\r
378 PageTable[Index] = (UINT64)(UINTN)NewPageTable | ActiveAddressEncMask |\r
379 IA32_PG_P | IA32_PG_RW;\r
380 PageTable = NewPageTable;\r
381 }\r
382 }\r
383}\r
384\r
385/**\r
386 Prevent the memory pages used for page table from been overwritten.\r
387\r
388 @param[in] PageTableBase Base address of page table (CR3).\r
389 @param[in] Level4Paging Level 4 paging flag.\r
390\r
391**/\r
392STATIC\r
393VOID\r
394EnablePageTableProtection (\r
395 IN UINTN PageTableBase,\r
396 IN BOOLEAN Level4Paging\r
397 )\r
398{\r
399 PAGE_TABLE_POOL *HeadPool;\r
400 PAGE_TABLE_POOL *Pool;\r
401 UINT64 PoolSize;\r
402 EFI_PHYSICAL_ADDRESS Address;\r
403\r
404 if (mPageTablePool == NULL) {\r
405 return;\r
406 }\r
407\r
408 //\r
409 // SetPageTablePoolReadOnly might update mPageTablePool. It's safer to\r
410 // remember original one in advance.\r
411 //\r
412 HeadPool = mPageTablePool;\r
413 Pool = HeadPool;\r
414 do {\r
415 Address = (EFI_PHYSICAL_ADDRESS)(UINTN)Pool;\r
416 PoolSize = Pool->Offset + EFI_PAGES_TO_SIZE (Pool->FreePages);\r
417\r
418 //\r
419 // The size of one pool must be multiple of PAGE_TABLE_POOL_UNIT_SIZE,\r
420 // which is one of page size of the processor (2MB by default). Let's apply\r
421 // the protection to them one by one.\r
422 //\r
423 while (PoolSize > 0) {\r
424 SetPageTablePoolReadOnly (PageTableBase, Address, Level4Paging);\r
425 Address += PAGE_TABLE_POOL_UNIT_SIZE;\r
426 PoolSize -= PAGE_TABLE_POOL_UNIT_SIZE;\r
427 }\r
428\r
429 Pool = Pool->NextPool;\r
430 } while (Pool != HeadPool);\r
431}\r
432\r
433/**\r
434 Split 1G page to 2M.\r
435\r
436 @param[in] PhysicalAddress Start physical address the 1G page\r
437 covered.\r
438 @param[in, out] PageEntry1G Pointer to 1G page entry.\r
439 @param[in] StackBase Stack base address.\r
440 @param[in] StackSize Stack size.\r
441\r
442**/\r
443STATIC\r
444VOID\r
445Split1GPageTo2M (\r
446 IN PHYSICAL_ADDRESS PhysicalAddress,\r
447 IN OUT UINT64 *PageEntry1G,\r
448 IN PHYSICAL_ADDRESS StackBase,\r
449 IN UINTN StackSize\r
450 )\r
451{\r
452 PHYSICAL_ADDRESS PhysicalAddress2M;\r
453 UINTN IndexOfPageDirectoryEntries;\r
454 PAGE_TABLE_ENTRY *PageDirectoryEntry;\r
455 UINT64 AddressEncMask;\r
456 UINT64 ActiveAddressEncMask;\r
457\r
458 PageDirectoryEntry = AllocatePageTableMemory (1);\r
459 if (PageDirectoryEntry == NULL) {\r
460 return;\r
461 }\r
462\r
463 AddressEncMask = GetMemEncryptionAddressMask ();\r
464 ASSERT (PageDirectoryEntry != NULL);\r
465\r
466 ActiveAddressEncMask = *PageEntry1G & AddressEncMask;\r
467 //\r
468 // Fill in 1G page entry.\r
469 //\r
470 *PageEntry1G = ((UINT64)(UINTN)PageDirectoryEntry |\r
471 IA32_PG_P | IA32_PG_RW | ActiveAddressEncMask);\r
472\r
473 PhysicalAddress2M = PhysicalAddress;\r
474 for (IndexOfPageDirectoryEntries = 0;\r
475 IndexOfPageDirectoryEntries < 512;\r
476 (IndexOfPageDirectoryEntries++,\r
477 PageDirectoryEntry++,\r
478 PhysicalAddress2M += SIZE_2MB))\r
479 {\r
480 if ((PhysicalAddress2M < StackBase + StackSize) &&\r
481 ((PhysicalAddress2M + SIZE_2MB) > StackBase))\r
482 {\r
483 //\r
484 // Need to split this 2M page that covers stack range.\r
485 //\r
486 Split2MPageTo4K (\r
487 PhysicalAddress2M,\r
488 (UINT64 *)PageDirectoryEntry,\r
489 StackBase,\r
490 StackSize,\r
491 ActiveAddressEncMask\r
492 );\r
493 } else {\r
494 //\r
495 // Fill in the Page Directory entries\r
496 //\r
497 PageDirectoryEntry->Uint64 = (UINT64)PhysicalAddress2M | ActiveAddressEncMask;\r
498 PageDirectoryEntry->Bits.ReadWrite = 1;\r
499 PageDirectoryEntry->Bits.Present = 1;\r
500 PageDirectoryEntry->Bits.MustBe1 = 1;\r
501 }\r
502 }\r
503}\r
504\r
505/**\r
506 Set or Clear the memory shared bit\r
507\r
508 @param[in] PagetablePoint Page table entry pointer (PTE).\r
509 @param[in] Mode Set or Clear shared bit\r
510\r
511**/\r
512STATIC VOID\r
513SetOrClearSharedBit (\r
514 IN OUT UINT64 *PageTablePointer,\r
515 IN TDX_PAGETABLE_MODE Mode,\r
516 IN PHYSICAL_ADDRESS PhysicalAddress,\r
517 IN UINT64 Length\r
518 )\r
519{\r
520 UINT64 AddressEncMask;\r
521 UINT64 Status;\r
522\r
523 AddressEncMask = GetMemEncryptionAddressMask ();\r
524\r
525 //\r
526 // Set or clear page table entry. Also, set shared bit in physical address, before calling MapGPA\r
527 //\r
528 if (Mode == SetSharedBit) {\r
529 *PageTablePointer |= AddressEncMask;\r
530 PhysicalAddress |= AddressEncMask;\r
531 } else {\r
532 *PageTablePointer &= ~AddressEncMask;\r
533 PhysicalAddress &= ~AddressEncMask;\r
534 }\r
535\r
536 Status = TdVmCall (TDVMCALL_MAPGPA, PhysicalAddress, Length, 0, 0, NULL);\r
537\r
538 //\r
539 // If changing shared to private, must accept-page again\r
540 //\r
541 if (Mode == ClearSharedBit) {\r
542 TdAcceptPages (PhysicalAddress, Length / EFI_PAGE_SIZE, EFI_PAGE_SIZE);\r
543 }\r
544\r
545 DEBUG ((\r
546 DEBUG_VERBOSE,\r
547 "%a:%a: pte=0x%Lx AddressEncMask=0x%Lx Mode=0x%x MapGPA Status=0x%x\n",\r
548 gEfiCallerBaseName,\r
549 __FUNCTION__,\r
550 *PageTablePointer,\r
551 AddressEncMask,\r
552 Mode,\r
553 Status\r
554 ));\r
555}\r
556\r
557/**\r
558 Check the WP status in CR0 register. This bit is used to lock or unlock write\r
559 access to pages marked as read-only.\r
560\r
561 @retval TRUE Write protection is enabled.\r
562 @retval FALSE Write protection is disabled.\r
563**/\r
564STATIC\r
565BOOLEAN\r
566IsReadOnlyPageWriteProtected (\r
567 VOID\r
568 )\r
569{\r
570 return ((AsmReadCr0 () & BIT16) != 0);\r
571}\r
572\r
573/**\r
574 Disable Write Protect on pages marked as read-only.\r
575**/\r
576STATIC\r
577VOID\r
578DisableReadOnlyPageWriteProtect (\r
579 VOID\r
580 )\r
581{\r
582 AsmWriteCr0 (AsmReadCr0 () & ~BIT16);\r
583}\r
584\r
585/**\r
586 Enable Write Protect on pages marked as read-only.\r
587**/\r
588VOID\r
589EnableReadOnlyPageWriteProtect (\r
590 VOID\r
591 )\r
592{\r
593 AsmWriteCr0 (AsmReadCr0 () | BIT16);\r
594}\r
595\r
596/**\r
597 This function either sets or clears memory encryption for the memory\r
598 region specified by PhysicalAddress and Length from the current page table\r
599 context.\r
600\r
601 The function iterates through the PhysicalAddress one page at a time, and set\r
602 or clears the memory encryption in the page table. If it encounters\r
603 that a given physical address range is part of large page then it attempts to\r
604 change the attribute at one go (based on size), otherwise it splits the\r
605 large pages into smaller (e.g 2M page into 4K pages) and then try to set or\r
606 clear the shared bit on the smallest page size.\r
607\r
608 @param[in] Cr3BaseAddress Cr3 Base Address (if zero then use\r
609 current CR3)\r
610 @param[in] PhysicalAddress The physical address that is the start\r
611 address of a memory region.\r
612 @param[in] Length The length of memory region\r
613 @param[in] Mode Set or Clear mode\r
614\r
615 @retval RETURN_SUCCESS The attributes were cleared for the\r
616 memory region.\r
617 @retval RETURN_INVALID_PARAMETER Number of pages is zero.\r
618 @retval RETURN_UNSUPPORTED Setting the memory encyrption attribute\r
619 is not supported\r
620**/\r
621STATIC\r
622RETURN_STATUS\r
623EFIAPI\r
624SetMemorySharedOrPrivate (\r
625 IN PHYSICAL_ADDRESS Cr3BaseAddress,\r
626 IN PHYSICAL_ADDRESS PhysicalAddress,\r
627 IN UINTN Length,\r
628 IN TDX_PAGETABLE_MODE Mode\r
629 )\r
630{\r
631 PAGE_MAP_AND_DIRECTORY_POINTER *PageMapLevel4Entry;\r
632 PAGE_MAP_AND_DIRECTORY_POINTER *PageUpperDirectoryPointerEntry;\r
633 PAGE_MAP_AND_DIRECTORY_POINTER *PageDirectoryPointerEntry;\r
634 PAGE_TABLE_1G_ENTRY *PageDirectory1GEntry;\r
635 PAGE_TABLE_ENTRY *PageDirectory2MEntry;\r
636 PAGE_TABLE_4K_ENTRY *PageTableEntry;\r
637 UINT64 PgTableMask;\r
638 UINT64 AddressEncMask;\r
639 UINT64 ActiveEncMask;\r
640 BOOLEAN IsWpEnabled;\r
641 RETURN_STATUS Status;\r
642 IA32_CR4 Cr4;\r
643 BOOLEAN Page5LevelSupport;\r
644\r
645 //\r
646 // Set PageMapLevel4Entry to suppress incorrect compiler/analyzer warnings.\r
647 //\r
648 PageMapLevel4Entry = NULL;\r
649\r
650 DEBUG ((\r
651 DEBUG_VERBOSE,\r
652 "%a:%a: Cr3Base=0x%Lx Physical=0x%Lx Length=0x%Lx Mode=%a\n",\r
653 gEfiCallerBaseName,\r
654 __FUNCTION__,\r
655 Cr3BaseAddress,\r
656 PhysicalAddress,\r
657 (UINT64)Length,\r
658 (Mode == SetSharedBit) ? "Shared" : "Private"\r
659 ));\r
660\r
661 //\r
662 // Check if we have a valid memory encryption mask\r
663 //\r
664 AddressEncMask = GetMemEncryptionAddressMask ();\r
665\r
666 PgTableMask = AddressEncMask | EFI_PAGE_MASK;\r
667\r
668 if (Length == 0) {\r
669 return RETURN_INVALID_PARAMETER;\r
670 }\r
671\r
672 //\r
673 // Make sure that the page table is changeable.\r
674 //\r
675 IsWpEnabled = IsReadOnlyPageWriteProtected ();\r
676 if (IsWpEnabled) {\r
677 DisableReadOnlyPageWriteProtect ();\r
678 }\r
679\r
680 //\r
681 // If Cr3BaseAddress is not specified then read the current CR3\r
682 //\r
683 if (Cr3BaseAddress == 0) {\r
684 Cr3BaseAddress = AsmReadCr3 ();\r
685 }\r
686\r
687 //\r
688 // CPU will already have LA57 enabled so just check CR4\r
689 //\r
690 Cr4.UintN = AsmReadCr4 ();\r
691\r
692 Page5LevelSupport = (Cr4.Bits.LA57 ? TRUE : FALSE);\r
693 //\r
694 // If 5-level pages, adjust Cr3BaseAddress to point to first 4-level page directory,\r
695 // we will only have 1\r
696 //\r
697 if (Page5LevelSupport) {\r
698 Cr3BaseAddress = *(UINT64 *)Cr3BaseAddress & ~PgTableMask;\r
699 }\r
700\r
701 Status = EFI_SUCCESS;\r
702\r
703 while (Length) {\r
704 PageMapLevel4Entry = (VOID *)(Cr3BaseAddress & ~PgTableMask);\r
705 PageMapLevel4Entry += PML4_OFFSET (PhysicalAddress);\r
706 if (!PageMapLevel4Entry->Bits.Present) {\r
707 DEBUG ((\r
708 DEBUG_ERROR,\r
709 "%a:%a: bad PML4 for Physical=0x%Lx\n",\r
710 gEfiCallerBaseName,\r
711 __FUNCTION__,\r
712 PhysicalAddress\r
713 ));\r
714 Status = RETURN_NO_MAPPING;\r
715 goto Done;\r
716 }\r
717\r
718 PageDirectory1GEntry = (VOID *)(\r
719 (PageMapLevel4Entry->Bits.PageTableBaseAddress <<\r
720 12) & ~PgTableMask\r
721 );\r
722 PageDirectory1GEntry += PDP_OFFSET (PhysicalAddress);\r
723 if (!PageDirectory1GEntry->Bits.Present) {\r
724 DEBUG ((\r
725 DEBUG_ERROR,\r
726 "%a:%a: bad PDPE for Physical=0x%Lx\n",\r
727 gEfiCallerBaseName,\r
728 __FUNCTION__,\r
729 PhysicalAddress\r
730 ));\r
731 Status = RETURN_NO_MAPPING;\r
732 goto Done;\r
733 }\r
734\r
735 //\r
736 // If the MustBe1 bit is not 1, it's not actually a 1GB entry\r
737 //\r
738 if (PageDirectory1GEntry->Bits.MustBe1) {\r
739 //\r
740 // Valid 1GB page\r
741 // If we have at least 1GB to go, we can just update this entry\r
742 //\r
743 if (!(PhysicalAddress & (BIT30 - 1)) && (Length >= BIT30)) {\r
744 SetOrClearSharedBit (&PageDirectory1GEntry->Uint64, Mode, PhysicalAddress, BIT30);\r
745 DEBUG ((\r
746 DEBUG_VERBOSE,\r
747 "%a:%a: updated 1GB entry for Physical=0x%Lx\n",\r
748 gEfiCallerBaseName,\r
749 __FUNCTION__,\r
750 PhysicalAddress\r
751 ));\r
752 PhysicalAddress += BIT30;\r
753 Length -= BIT30;\r
754 } else {\r
755 //\r
756 // We must split the page\r
757 //\r
758 DEBUG ((\r
759 DEBUG_VERBOSE,\r
760 "%a:%a: splitting 1GB page for Physical=0x%Lx\n",\r
761 gEfiCallerBaseName,\r
762 __FUNCTION__,\r
763 PhysicalAddress\r
764 ));\r
765 Split1GPageTo2M (\r
766 (UINT64)PageDirectory1GEntry->Bits.PageTableBaseAddress << 30,\r
767 (UINT64 *)PageDirectory1GEntry,\r
768 0,\r
769 0\r
770 );\r
771 continue;\r
772 }\r
773 } else {\r
774 //\r
775 // Actually a PDP\r
776 //\r
777 PageUpperDirectoryPointerEntry =\r
778 (PAGE_MAP_AND_DIRECTORY_POINTER *)PageDirectory1GEntry;\r
779 PageDirectory2MEntry =\r
780 (VOID *)(\r
781 (PageUpperDirectoryPointerEntry->Bits.PageTableBaseAddress <<\r
782 12) & ~PgTableMask\r
783 );\r
784 PageDirectory2MEntry += PDE_OFFSET (PhysicalAddress);\r
785 if (!PageDirectory2MEntry->Bits.Present) {\r
786 DEBUG ((\r
787 DEBUG_ERROR,\r
788 "%a:%a: bad PDE for Physical=0x%Lx\n",\r
789 gEfiCallerBaseName,\r
790 __FUNCTION__,\r
791 PhysicalAddress\r
792 ));\r
793 Status = RETURN_NO_MAPPING;\r
794 goto Done;\r
795 }\r
796\r
797 //\r
798 // If the MustBe1 bit is not a 1, it's not a 2MB entry\r
799 //\r
800 if (PageDirectory2MEntry->Bits.MustBe1) {\r
801 //\r
802 // Valid 2MB page\r
803 // If we have at least 2MB left to go, we can just update this entry\r
804 //\r
805 if (!(PhysicalAddress & (BIT21-1)) && (Length >= BIT21)) {\r
806 SetOrClearSharedBit (&PageDirectory2MEntry->Uint64, Mode, PhysicalAddress, BIT21);\r
807 PhysicalAddress += BIT21;\r
808 Length -= BIT21;\r
809 } else {\r
810 //\r
811 // We must split up this page into 4K pages\r
812 //\r
813 DEBUG ((\r
814 DEBUG_VERBOSE,\r
815 "%a:%a: splitting 2MB page for Physical=0x%Lx\n",\r
816 gEfiCallerBaseName,\r
817 __FUNCTION__,\r
818 PhysicalAddress\r
819 ));\r
820\r
821 ActiveEncMask = PageDirectory2MEntry->Uint64 & AddressEncMask;\r
822\r
823 Split2MPageTo4K (\r
824 (UINT64)PageDirectory2MEntry->Bits.PageTableBaseAddress << 21,\r
825 (UINT64 *)PageDirectory2MEntry,\r
826 0,\r
827 0,\r
828 ActiveEncMask\r
829 );\r
830 continue;\r
831 }\r
832 } else {\r
833 PageDirectoryPointerEntry =\r
834 (PAGE_MAP_AND_DIRECTORY_POINTER *)PageDirectory2MEntry;\r
835 PageTableEntry =\r
836 (VOID *)(\r
837 (PageDirectoryPointerEntry->Bits.PageTableBaseAddress <<\r
838 12) & ~PgTableMask\r
839 );\r
840 PageTableEntry += PTE_OFFSET (PhysicalAddress);\r
841 if (!PageTableEntry->Bits.Present) {\r
842 DEBUG ((\r
843 DEBUG_ERROR,\r
844 "%a:%a: bad PTE for Physical=0x%Lx\n",\r
845 gEfiCallerBaseName,\r
846 __FUNCTION__,\r
847 PhysicalAddress\r
848 ));\r
849 Status = RETURN_NO_MAPPING;\r
850 goto Done;\r
851 }\r
852\r
853 SetOrClearSharedBit (&PageTableEntry->Uint64, Mode, PhysicalAddress, EFI_PAGE_SIZE);\r
854 PhysicalAddress += EFI_PAGE_SIZE;\r
855 Length -= EFI_PAGE_SIZE;\r
856 }\r
857 }\r
858 }\r
859\r
860 //\r
861 // Protect the page table by marking the memory used for page table to be\r
862 // read-only.\r
863 //\r
864 if (IsWpEnabled) {\r
865 EnablePageTableProtection ((UINTN)PageMapLevel4Entry, TRUE);\r
866 }\r
867\r
868 //\r
869 // Flush TLB\r
870 //\r
871 CpuFlushTlb ();\r
872\r
873Done:\r
874 //\r
875 // Restore page table write protection, if any.\r
876 //\r
877 if (IsWpEnabled) {\r
878 EnableReadOnlyPageWriteProtect ();\r
879 }\r
880\r
881 return Status;\r
882}\r
883\r
884/**\r
885 This function clears memory shared bit for the memory region specified by\r
886 BaseAddress and NumPages from the current page table context.\r
887\r
888 @param[in] Cr3BaseAddress Cr3 Base Address (if zero then use\r
889 current CR3)\r
890 @param[in] BaseAddress The physical address that is the start\r
891 address of a memory region.\r
892 @param[in] NumPages The number of pages from start memory\r
893 region.\r
894\r
895 @retval RETURN_SUCCESS The attributes were cleared for the\r
896 memory region.\r
897 @retval RETURN_INVALID_PARAMETER Number of pages is zero.\r
898 @retval RETURN_UNSUPPORTED Clearing the memory encryption attribute\r
899 is not supported\r
900**/\r
901RETURN_STATUS\r
902EFIAPI\r
903MemEncryptTdxSetPageSharedBit (\r
904 IN PHYSICAL_ADDRESS Cr3BaseAddress,\r
905 IN PHYSICAL_ADDRESS BaseAddress,\r
906 IN UINTN NumPages\r
907 )\r
908{\r
909 return SetMemorySharedOrPrivate (\r
910 Cr3BaseAddress,\r
911 BaseAddress,\r
912 EFI_PAGES_TO_SIZE (NumPages),\r
913 SetSharedBit\r
914 );\r
915}\r
916\r
917/**\r
918 This function sets memory shared bit for the memory region specified by\r
919 BaseAddress and NumPages from the current page table context.\r
920\r
921 @param[in] Cr3BaseAddress Cr3 Base Address (if zero then use\r
922 current CR3)\r
923 @param[in] BaseAddress The physical address that is the start\r
924 address of a memory region.\r
925 @param[in] NumPages The number of pages from start memory\r
926 region.\r
927\r
928 @retval RETURN_SUCCESS The attributes were set for the memory\r
929 region.\r
930 @retval RETURN_INVALID_PARAMETER Number of pages is zero.\r
931 @retval RETURN_UNSUPPORTED Setting the memory encryption attribute\r
932 is not supported\r
933**/\r
934RETURN_STATUS\r
935EFIAPI\r
936MemEncryptTdxClearPageSharedBit (\r
937 IN PHYSICAL_ADDRESS Cr3BaseAddress,\r
938 IN PHYSICAL_ADDRESS BaseAddress,\r
939 IN UINTN NumPages\r
940 )\r
941{\r
942 return SetMemorySharedOrPrivate (\r
943 Cr3BaseAddress,\r
944 BaseAddress,\r
945 EFI_PAGES_TO_SIZE (NumPages),\r
946 ClearSharedBit\r
947 );\r
948}\r