]> git.proxmox.com Git - mirror_edk2.git/blame - OvmfPkg/Library/BaseMemEncryptTdxLib/MemoryEncryption.c
OvmfPkg/BaseMemEncryptTdxLib: Refactor error handle of SetOrClearSharedBit
[mirror_edk2.git] / OvmfPkg / Library / BaseMemEncryptTdxLib / MemoryEncryption.c
CommitLineData
5aa80186
MX
1/** @file\r
2\r
3 Virtual Memory Management Services to set or clear the memory encryption.\r
4\r
5 Copyright (c) 2006 - 2018, Intel Corporation. All rights reserved.<BR>\r
6 Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>\r
7\r
8 SPDX-License-Identifier: BSD-2-Clause-Patent\r
9\r
10 Code is derived from MdeModulePkg/Core/DxeIplPeim/X64/VirtualMemory.c\r
11\r
12 Note:\r
13 There a lot of duplicated codes for Page Table operations. These\r
14 codes should be moved to a common library (PageTablesLib) so that it is\r
15 more friendly for review and maintain. There is a new feature requirement\r
16 https://bugzilla.tianocore.org/show_bug.cgi?id=847 which is to implement\r
17 the library. After the lib is introduced, this file will be refactored.\r
18\r
19**/\r
20\r
21#include <Uefi.h>\r
22#include <Uefi/UefiBaseType.h>\r
23#include <Library/CpuLib.h>\r
24#include <Library/BaseLib.h>\r
25#include <Library/DebugLib.h>\r
26#include <Library/MemEncryptTdxLib.h>\r
27#include "VirtualMemory.h"\r
28#include <IndustryStandard/Tdx.h>\r
29#include <Library/TdxLib.h>\r
720c25ab
MX
30#include <Library/UefiBootServicesTableLib.h>\r
31#include <Protocol/MemoryAccept.h>\r
5aa80186
MX
32#include <ConfidentialComputingGuestAttr.h>\r
33\r
34typedef enum {\r
35 SetSharedBit,\r
36 ClearSharedBit\r
37} TDX_PAGETABLE_MODE;\r
38\r
39STATIC PAGE_TABLE_POOL *mPageTablePool = NULL;\r
40\r
41/**\r
42 Returns boolean to indicate whether to indicate which, if any, memory encryption is enabled\r
43\r
44 @param[in] Type Bitmask of encryption technologies to check is enabled\r
45\r
46 @retval TRUE The encryption type(s) are enabled\r
47 @retval FALSE The encryption type(s) are not enabled\r
48**/\r
49BOOLEAN\r
50EFIAPI\r
51MemEncryptTdxIsEnabled (\r
52 VOID\r
53 )\r
54{\r
55 return CC_GUEST_IS_TDX (PcdGet64 (PcdConfidentialComputingGuestAttr));\r
56}\r
57\r
58/**\r
59 Get the memory encryption mask\r
60\r
61 @param[out] EncryptionMask contains the pte mask.\r
62\r
63**/\r
64STATIC\r
65UINT64\r
66GetMemEncryptionAddressMask (\r
67 VOID\r
68 )\r
69{\r
70 return TdSharedPageMask ();\r
71}\r
72\r
73/**\r
74 Initialize a buffer pool for page table use only.\r
75\r
76 To reduce the potential split operation on page table, the pages reserved for\r
77 page table should be allocated in the times of PAGE_TABLE_POOL_UNIT_PAGES and\r
78 at the boundary of PAGE_TABLE_POOL_ALIGNMENT. So the page pool is always\r
79 initialized with number of pages greater than or equal to the given\r
80 PoolPages.\r
81\r
82 Once the pages in the pool are used up, this method should be called again to\r
83 reserve at least another PAGE_TABLE_POOL_UNIT_PAGES. Usually this won't\r
84 happen often in practice.\r
85\r
86 @param[in] PoolPages The least page number of the pool to be created.\r
87\r
88 @retval TRUE The pool is initialized successfully.\r
89 @retval FALSE The memory is out of resource.\r
90**/\r
91STATIC\r
92BOOLEAN\r
93InitializePageTablePool (\r
94 IN UINTN PoolPages\r
95 )\r
96{\r
97 VOID *Buffer;\r
98\r
99 //\r
100 // Always reserve at least PAGE_TABLE_POOL_UNIT_PAGES, including one page for\r
101 // header.\r
102 //\r
103 PoolPages += 1; // Add one page for header.\r
104 PoolPages = ((PoolPages - 1) / PAGE_TABLE_POOL_UNIT_PAGES + 1) *\r
105 PAGE_TABLE_POOL_UNIT_PAGES;\r
106 Buffer = AllocateAlignedPages (PoolPages, PAGE_TABLE_POOL_ALIGNMENT);\r
107 if (Buffer == NULL) {\r
108 DEBUG ((DEBUG_ERROR, "ERROR: Out of aligned pages\r\n"));\r
109 return FALSE;\r
110 }\r
111\r
112 //\r
113 // Link all pools into a list for easier track later.\r
114 //\r
115 if (mPageTablePool == NULL) {\r
116 mPageTablePool = Buffer;\r
117 mPageTablePool->NextPool = mPageTablePool;\r
118 } else {\r
119 ((PAGE_TABLE_POOL *)Buffer)->NextPool = mPageTablePool->NextPool;\r
120 mPageTablePool->NextPool = Buffer;\r
121 mPageTablePool = Buffer;\r
122 }\r
123\r
124 //\r
125 // Reserve one page for pool header.\r
126 //\r
127 mPageTablePool->FreePages = PoolPages - 1;\r
128 mPageTablePool->Offset = EFI_PAGES_TO_SIZE (1);\r
129\r
130 return TRUE;\r
131}\r
132\r
133/**\r
134 This API provides a way to allocate memory for page table.\r
135\r
136 This API can be called more than once to allocate memory for page tables.\r
137\r
138 Allocates the number of 4KB pages and returns a pointer to the allocated\r
139 buffer. The buffer returned is aligned on a 4KB boundary.\r
140\r
141 If Pages is 0, then NULL is returned.\r
142 If there is not enough memory remaining to satisfy the request, then NULL is\r
143 returned.\r
144\r
145 @param Pages The number of 4 KB pages to allocate.\r
146\r
147 @return A pointer to the allocated buffer or NULL if allocation fails.\r
148\r
149**/\r
150STATIC\r
151VOID *\r
152EFIAPI\r
153AllocatePageTableMemory (\r
154 IN UINTN Pages\r
155 )\r
156{\r
157 VOID *Buffer;\r
158\r
159 if (Pages == 0) {\r
160 return NULL;\r
161 }\r
162\r
163 //\r
164 // Renew the pool if necessary.\r
165 //\r
166 if ((mPageTablePool == NULL) ||\r
167 (Pages > mPageTablePool->FreePages))\r
168 {\r
169 if (!InitializePageTablePool (Pages)) {\r
170 return NULL;\r
171 }\r
172 }\r
173\r
174 Buffer = (UINT8 *)mPageTablePool + mPageTablePool->Offset;\r
175\r
176 mPageTablePool->Offset += EFI_PAGES_TO_SIZE (Pages);\r
177 mPageTablePool->FreePages -= Pages;\r
178\r
179 DEBUG ((\r
180 DEBUG_VERBOSE,\r
181 "%a:%a: Buffer=0x%Lx Pages=%ld\n",\r
182 gEfiCallerBaseName,\r
183 __FUNCTION__,\r
184 Buffer,\r
185 Pages\r
186 ));\r
187\r
188 return Buffer;\r
189}\r
190\r
191/**\r
192 Split 2M page to 4K.\r
193\r
194 @param[in] PhysicalAddress Start physical address the 2M page\r
195 covered.\r
196 @param[in, out] PageEntry2M Pointer to 2M page entry.\r
197 @param[in] StackBase Stack base address.\r
198 @param[in] StackSize Stack size.\r
199\r
200**/\r
201STATIC\r
202VOID\r
203Split2MPageTo4K (\r
204 IN PHYSICAL_ADDRESS PhysicalAddress,\r
205 IN OUT UINT64 *PageEntry2M,\r
206 IN PHYSICAL_ADDRESS StackBase,\r
207 IN UINTN StackSize,\r
208 IN UINT64 AddressEncMask\r
209 )\r
210{\r
211 PHYSICAL_ADDRESS PhysicalAddress4K;\r
212 UINTN IndexOfPageTableEntries;\r
213 PAGE_TABLE_4K_ENTRY *PageTableEntry, *PageTableEntry1;\r
214\r
215 PageTableEntry = AllocatePageTableMemory (1);\r
216\r
217 PageTableEntry1 = PageTableEntry;\r
218\r
219 if (PageTableEntry == NULL) {\r
220 ASSERT (FALSE);\r
221 return;\r
222 }\r
223\r
224 PhysicalAddress4K = PhysicalAddress;\r
225 for (IndexOfPageTableEntries = 0;\r
226 IndexOfPageTableEntries < 512;\r
227 (IndexOfPageTableEntries++,\r
228 PageTableEntry++,\r
229 PhysicalAddress4K += SIZE_4KB))\r
230 {\r
231 //\r
232 // Fill in the Page Table entries\r
233 //\r
234 PageTableEntry->Uint64 = (UINT64)PhysicalAddress4K | AddressEncMask;\r
235 PageTableEntry->Bits.ReadWrite = 1;\r
236 PageTableEntry->Bits.Present = 1;\r
237 if ((PhysicalAddress4K >= StackBase) &&\r
238 (PhysicalAddress4K < StackBase + StackSize))\r
239 {\r
240 //\r
241 // Set Nx bit for stack.\r
242 //\r
243 PageTableEntry->Bits.Nx = 1;\r
244 }\r
245 }\r
246\r
247 //\r
248 // Fill in 2M page entry.\r
249 //\r
250 *PageEntry2M = ((UINT64)(UINTN)PageTableEntry1 |\r
251 IA32_PG_P | IA32_PG_RW | AddressEncMask);\r
252}\r
253\r
254/**\r
255 Set one page of page table pool memory to be read-only.\r
256\r
257 @param[in] PageTableBase Base address of page table (CR3).\r
258 @param[in] Address Start address of a page to be set as read-only.\r
259 @param[in] Level4Paging Level 4 paging flag.\r
260\r
261**/\r
262STATIC\r
263VOID\r
264SetPageTablePoolReadOnly (\r
265 IN UINTN PageTableBase,\r
266 IN EFI_PHYSICAL_ADDRESS Address,\r
267 IN BOOLEAN Level4Paging\r
268 )\r
269{\r
270 UINTN Index;\r
271 UINTN EntryIndex;\r
272 UINT64 AddressEncMask;\r
273 UINT64 ActiveAddressEncMask;\r
274 EFI_PHYSICAL_ADDRESS PhysicalAddress;\r
275 UINT64 *PageTable;\r
276 UINT64 *NewPageTable;\r
277 UINT64 PageAttr;\r
278 UINT64 LevelSize[5];\r
279 UINT64 LevelMask[5];\r
280 UINTN LevelShift[5];\r
281 UINTN Level;\r
282 UINT64 PoolUnitSize;\r
283\r
284 if (PageTableBase == 0) {\r
285 ASSERT (FALSE);\r
286 return;\r
287 }\r
288\r
289 //\r
290 // Since the page table is always from page table pool, which is always\r
291 // located at the boundary of PcdPageTablePoolAlignment, we just need to\r
292 // set the whole pool unit to be read-only.\r
293 //\r
294 Address = Address & PAGE_TABLE_POOL_ALIGN_MASK;\r
295\r
296 LevelShift[1] = PAGING_L1_ADDRESS_SHIFT;\r
297 LevelShift[2] = PAGING_L2_ADDRESS_SHIFT;\r
298 LevelShift[3] = PAGING_L3_ADDRESS_SHIFT;\r
299 LevelShift[4] = PAGING_L4_ADDRESS_SHIFT;\r
300\r
301 LevelMask[1] = PAGING_4K_ADDRESS_MASK_64;\r
302 LevelMask[2] = PAGING_2M_ADDRESS_MASK_64;\r
303 LevelMask[3] = PAGING_1G_ADDRESS_MASK_64;\r
304 LevelMask[4] = PAGING_1G_ADDRESS_MASK_64;\r
305\r
306 LevelSize[1] = SIZE_4KB;\r
307 LevelSize[2] = SIZE_2MB;\r
308 LevelSize[3] = SIZE_1GB;\r
309 LevelSize[4] = SIZE_512GB;\r
310\r
311 AddressEncMask = GetMemEncryptionAddressMask () &\r
312 PAGING_1G_ADDRESS_MASK_64;\r
313 PageTable = (UINT64 *)(UINTN)PageTableBase;\r
314 PoolUnitSize = PAGE_TABLE_POOL_UNIT_SIZE;\r
315\r
316 for (Level = (Level4Paging) ? 4 : 3; Level > 0; --Level) {\r
317 Index = ((UINTN)RShiftU64 (Address, LevelShift[Level]));\r
318 Index &= PAGING_PAE_INDEX_MASK;\r
319\r
320 PageAttr = PageTable[Index];\r
321 ActiveAddressEncMask = GetMemEncryptionAddressMask () & PageAttr;\r
322\r
323 if ((PageAttr & IA32_PG_PS) == 0) {\r
324 //\r
325 // Go to next level of table.\r
326 //\r
327 PageTable = (UINT64 *)(UINTN)(PageAttr & ~AddressEncMask &\r
328 PAGING_4K_ADDRESS_MASK_64);\r
329 continue;\r
330 }\r
331\r
332 if (PoolUnitSize >= LevelSize[Level]) {\r
333 //\r
334 // Clear R/W bit if current page granularity is not larger than pool unit\r
335 // size.\r
336 //\r
337 if ((PageAttr & IA32_PG_RW) != 0) {\r
338 while (PoolUnitSize > 0) {\r
339 //\r
340 // PAGE_TABLE_POOL_UNIT_SIZE and PAGE_TABLE_POOL_ALIGNMENT are fit in\r
341 // one page (2MB). Then we don't need to update attributes for pages\r
342 // crossing page directory. ASSERT below is for that purpose.\r
343 //\r
344 ASSERT (Index < EFI_PAGE_SIZE/sizeof (UINT64));\r
345\r
346 PageTable[Index] &= ~(UINT64)IA32_PG_RW;\r
347 PoolUnitSize -= LevelSize[Level];\r
348\r
349 ++Index;\r
350 }\r
351 }\r
352\r
353 break;\r
354 } else {\r
355 //\r
356 // The smaller granularity of page must be needed.\r
357 //\r
358 ASSERT (Level > 1);\r
359\r
360 NewPageTable = AllocatePageTableMemory (1);\r
361 if (NewPageTable == NULL) {\r
362 ASSERT (FALSE);\r
363 return;\r
364 }\r
365\r
366 PhysicalAddress = PageAttr & LevelMask[Level];\r
367 for (EntryIndex = 0;\r
368 EntryIndex < EFI_PAGE_SIZE/sizeof (UINT64);\r
369 ++EntryIndex)\r
370 {\r
371 NewPageTable[EntryIndex] = PhysicalAddress | ActiveAddressEncMask |\r
372 IA32_PG_P | IA32_PG_RW;\r
373 if (Level > 2) {\r
374 NewPageTable[EntryIndex] |= IA32_PG_PS;\r
375 }\r
376\r
377 PhysicalAddress += LevelSize[Level - 1];\r
378 }\r
379\r
380 PageTable[Index] = (UINT64)(UINTN)NewPageTable | ActiveAddressEncMask |\r
381 IA32_PG_P | IA32_PG_RW;\r
382 PageTable = NewPageTable;\r
383 }\r
384 }\r
385}\r
386\r
387/**\r
388 Prevent the memory pages used for page table from been overwritten.\r
389\r
390 @param[in] PageTableBase Base address of page table (CR3).\r
391 @param[in] Level4Paging Level 4 paging flag.\r
392\r
393**/\r
394STATIC\r
395VOID\r
396EnablePageTableProtection (\r
397 IN UINTN PageTableBase,\r
398 IN BOOLEAN Level4Paging\r
399 )\r
400{\r
401 PAGE_TABLE_POOL *HeadPool;\r
402 PAGE_TABLE_POOL *Pool;\r
403 UINT64 PoolSize;\r
404 EFI_PHYSICAL_ADDRESS Address;\r
405\r
406 if (mPageTablePool == NULL) {\r
407 return;\r
408 }\r
409\r
410 //\r
411 // SetPageTablePoolReadOnly might update mPageTablePool. It's safer to\r
412 // remember original one in advance.\r
413 //\r
414 HeadPool = mPageTablePool;\r
415 Pool = HeadPool;\r
416 do {\r
417 Address = (EFI_PHYSICAL_ADDRESS)(UINTN)Pool;\r
418 PoolSize = Pool->Offset + EFI_PAGES_TO_SIZE (Pool->FreePages);\r
419\r
420 //\r
421 // The size of one pool must be multiple of PAGE_TABLE_POOL_UNIT_SIZE,\r
422 // which is one of page size of the processor (2MB by default). Let's apply\r
423 // the protection to them one by one.\r
424 //\r
425 while (PoolSize > 0) {\r
426 SetPageTablePoolReadOnly (PageTableBase, Address, Level4Paging);\r
427 Address += PAGE_TABLE_POOL_UNIT_SIZE;\r
428 PoolSize -= PAGE_TABLE_POOL_UNIT_SIZE;\r
429 }\r
430\r
431 Pool = Pool->NextPool;\r
432 } while (Pool != HeadPool);\r
433}\r
434\r
435/**\r
436 Split 1G page to 2M.\r
437\r
438 @param[in] PhysicalAddress Start physical address the 1G page\r
439 covered.\r
440 @param[in, out] PageEntry1G Pointer to 1G page entry.\r
441 @param[in] StackBase Stack base address.\r
442 @param[in] StackSize Stack size.\r
443\r
444**/\r
445STATIC\r
446VOID\r
447Split1GPageTo2M (\r
448 IN PHYSICAL_ADDRESS PhysicalAddress,\r
449 IN OUT UINT64 *PageEntry1G,\r
450 IN PHYSICAL_ADDRESS StackBase,\r
451 IN UINTN StackSize\r
452 )\r
453{\r
454 PHYSICAL_ADDRESS PhysicalAddress2M;\r
455 UINTN IndexOfPageDirectoryEntries;\r
456 PAGE_TABLE_ENTRY *PageDirectoryEntry;\r
457 UINT64 AddressEncMask;\r
458 UINT64 ActiveAddressEncMask;\r
459\r
460 PageDirectoryEntry = AllocatePageTableMemory (1);\r
461 if (PageDirectoryEntry == NULL) {\r
462 return;\r
463 }\r
464\r
465 AddressEncMask = GetMemEncryptionAddressMask ();\r
466 ASSERT (PageDirectoryEntry != NULL);\r
467\r
468 ActiveAddressEncMask = *PageEntry1G & AddressEncMask;\r
469 //\r
470 // Fill in 1G page entry.\r
471 //\r
472 *PageEntry1G = ((UINT64)(UINTN)PageDirectoryEntry |\r
473 IA32_PG_P | IA32_PG_RW | ActiveAddressEncMask);\r
474\r
475 PhysicalAddress2M = PhysicalAddress;\r
476 for (IndexOfPageDirectoryEntries = 0;\r
477 IndexOfPageDirectoryEntries < 512;\r
478 (IndexOfPageDirectoryEntries++,\r
479 PageDirectoryEntry++,\r
480 PhysicalAddress2M += SIZE_2MB))\r
481 {\r
482 if ((PhysicalAddress2M < StackBase + StackSize) &&\r
483 ((PhysicalAddress2M + SIZE_2MB) > StackBase))\r
484 {\r
485 //\r
486 // Need to split this 2M page that covers stack range.\r
487 //\r
488 Split2MPageTo4K (\r
489 PhysicalAddress2M,\r
490 (UINT64 *)PageDirectoryEntry,\r
491 StackBase,\r
492 StackSize,\r
493 ActiveAddressEncMask\r
494 );\r
495 } else {\r
496 //\r
497 // Fill in the Page Directory entries\r
498 //\r
499 PageDirectoryEntry->Uint64 = (UINT64)PhysicalAddress2M | ActiveAddressEncMask;\r
500 PageDirectoryEntry->Bits.ReadWrite = 1;\r
501 PageDirectoryEntry->Bits.Present = 1;\r
502 PageDirectoryEntry->Bits.MustBe1 = 1;\r
503 }\r
504 }\r
505}\r
506\r
507/**\r
508 Set or Clear the memory shared bit\r
509\r
510 @param[in] PagetablePoint Page table entry pointer (PTE).\r
511 @param[in] Mode Set or Clear shared bit\r
512\r
5c7a6113
MX
513 @retval EFI_SUCCESS Successfully set or clear the memory shared bit\r
514 @retval Others Other error as indicated\r
5aa80186 515**/\r
5c7a6113
MX
516STATIC\r
517EFI_STATUS\r
5aa80186
MX
518SetOrClearSharedBit (\r
519 IN OUT UINT64 *PageTablePointer,\r
520 IN TDX_PAGETABLE_MODE Mode,\r
521 IN PHYSICAL_ADDRESS PhysicalAddress,\r
522 IN UINT64 Length\r
523 )\r
524{\r
720c25ab 525 UINT64 AddressEncMask;\r
5c7a6113
MX
526 UINT64 TdStatus;\r
527 EFI_STATUS Status;\r
720c25ab 528 EDKII_MEMORY_ACCEPT_PROTOCOL *MemoryAcceptProtocol;\r
5aa80186
MX
529\r
530 AddressEncMask = GetMemEncryptionAddressMask ();\r
531\r
532 //\r
533 // Set or clear page table entry. Also, set shared bit in physical address, before calling MapGPA\r
534 //\r
535 if (Mode == SetSharedBit) {\r
536 *PageTablePointer |= AddressEncMask;\r
537 PhysicalAddress |= AddressEncMask;\r
538 } else {\r
539 *PageTablePointer &= ~AddressEncMask;\r
540 PhysicalAddress &= ~AddressEncMask;\r
541 }\r
542\r
5c7a6113
MX
543 TdStatus = TdVmCall (TDVMCALL_MAPGPA, PhysicalAddress, Length, 0, 0, NULL);\r
544 if (TdStatus != 0) {\r
545 DEBUG ((DEBUG_ERROR, "%a: TdVmcall(MAPGPA) failed with %llx\n", __FUNCTION__, TdStatus));\r
546 ASSERT (FALSE);\r
547 return EFI_DEVICE_ERROR;\r
548 }\r
5aa80186
MX
549\r
550 //\r
551 // If changing shared to private, must accept-page again\r
552 //\r
553 if (Mode == ClearSharedBit) {\r
720c25ab 554 Status = gBS->LocateProtocol (&gEdkiiMemoryAcceptProtocolGuid, NULL, (VOID **)&MemoryAcceptProtocol);\r
5c7a6113
MX
555 if (EFI_ERROR (Status)) {\r
556 DEBUG ((DEBUG_ERROR, "%a: Failed to locate MemoryAcceptProtocol with %r\n", __FUNCTION__, Status));\r
557 ASSERT (FALSE);\r
558 return Status;\r
559 }\r
560\r
720c25ab 561 Status = MemoryAcceptProtocol->AcceptMemory (MemoryAcceptProtocol, PhysicalAddress, Length);\r
5c7a6113
MX
562 if (EFI_ERROR (Status)) {\r
563 DEBUG ((DEBUG_ERROR, "%a: Failed to AcceptMemory with %r\n", __FUNCTION__, Status));\r
564 ASSERT (FALSE);\r
565 return Status;\r
566 }\r
5aa80186
MX
567 }\r
568\r
569 DEBUG ((\r
570 DEBUG_VERBOSE,\r
571 "%a:%a: pte=0x%Lx AddressEncMask=0x%Lx Mode=0x%x MapGPA Status=0x%x\n",\r
572 gEfiCallerBaseName,\r
573 __FUNCTION__,\r
574 *PageTablePointer,\r
575 AddressEncMask,\r
576 Mode,\r
577 Status\r
578 ));\r
5c7a6113
MX
579\r
580 return EFI_SUCCESS;\r
5aa80186
MX
581}\r
582\r
583/**\r
584 Check the WP status in CR0 register. This bit is used to lock or unlock write\r
585 access to pages marked as read-only.\r
586\r
587 @retval TRUE Write protection is enabled.\r
588 @retval FALSE Write protection is disabled.\r
589**/\r
590STATIC\r
591BOOLEAN\r
592IsReadOnlyPageWriteProtected (\r
593 VOID\r
594 )\r
595{\r
596 return ((AsmReadCr0 () & BIT16) != 0);\r
597}\r
598\r
599/**\r
600 Disable Write Protect on pages marked as read-only.\r
601**/\r
602STATIC\r
603VOID\r
604DisableReadOnlyPageWriteProtect (\r
605 VOID\r
606 )\r
607{\r
608 AsmWriteCr0 (AsmReadCr0 () & ~BIT16);\r
609}\r
610\r
611/**\r
612 Enable Write Protect on pages marked as read-only.\r
613**/\r
614VOID\r
615EnableReadOnlyPageWriteProtect (\r
616 VOID\r
617 )\r
618{\r
619 AsmWriteCr0 (AsmReadCr0 () | BIT16);\r
620}\r
621\r
622/**\r
623 This function either sets or clears memory encryption for the memory\r
624 region specified by PhysicalAddress and Length from the current page table\r
625 context.\r
626\r
627 The function iterates through the PhysicalAddress one page at a time, and set\r
628 or clears the memory encryption in the page table. If it encounters\r
629 that a given physical address range is part of large page then it attempts to\r
630 change the attribute at one go (based on size), otherwise it splits the\r
631 large pages into smaller (e.g 2M page into 4K pages) and then try to set or\r
632 clear the shared bit on the smallest page size.\r
633\r
634 @param[in] Cr3BaseAddress Cr3 Base Address (if zero then use\r
635 current CR3)\r
636 @param[in] PhysicalAddress The physical address that is the start\r
637 address of a memory region.\r
638 @param[in] Length The length of memory region\r
639 @param[in] Mode Set or Clear mode\r
640\r
641 @retval RETURN_SUCCESS The attributes were cleared for the\r
642 memory region.\r
643 @retval RETURN_INVALID_PARAMETER Number of pages is zero.\r
644 @retval RETURN_UNSUPPORTED Setting the memory encyrption attribute\r
645 is not supported\r
646**/\r
647STATIC\r
648RETURN_STATUS\r
649EFIAPI\r
650SetMemorySharedOrPrivate (\r
651 IN PHYSICAL_ADDRESS Cr3BaseAddress,\r
652 IN PHYSICAL_ADDRESS PhysicalAddress,\r
653 IN UINTN Length,\r
654 IN TDX_PAGETABLE_MODE Mode\r
655 )\r
656{\r
657 PAGE_MAP_AND_DIRECTORY_POINTER *PageMapLevel4Entry;\r
658 PAGE_MAP_AND_DIRECTORY_POINTER *PageUpperDirectoryPointerEntry;\r
659 PAGE_MAP_AND_DIRECTORY_POINTER *PageDirectoryPointerEntry;\r
660 PAGE_TABLE_1G_ENTRY *PageDirectory1GEntry;\r
661 PAGE_TABLE_ENTRY *PageDirectory2MEntry;\r
662 PAGE_TABLE_4K_ENTRY *PageTableEntry;\r
663 UINT64 PgTableMask;\r
664 UINT64 AddressEncMask;\r
665 UINT64 ActiveEncMask;\r
666 BOOLEAN IsWpEnabled;\r
667 RETURN_STATUS Status;\r
668 IA32_CR4 Cr4;\r
669 BOOLEAN Page5LevelSupport;\r
670\r
671 //\r
672 // Set PageMapLevel4Entry to suppress incorrect compiler/analyzer warnings.\r
673 //\r
674 PageMapLevel4Entry = NULL;\r
675\r
676 DEBUG ((\r
677 DEBUG_VERBOSE,\r
678 "%a:%a: Cr3Base=0x%Lx Physical=0x%Lx Length=0x%Lx Mode=%a\n",\r
679 gEfiCallerBaseName,\r
680 __FUNCTION__,\r
681 Cr3BaseAddress,\r
682 PhysicalAddress,\r
683 (UINT64)Length,\r
684 (Mode == SetSharedBit) ? "Shared" : "Private"\r
685 ));\r
686\r
687 //\r
688 // Check if we have a valid memory encryption mask\r
689 //\r
690 AddressEncMask = GetMemEncryptionAddressMask ();\r
691\r
692 PgTableMask = AddressEncMask | EFI_PAGE_MASK;\r
693\r
694 if (Length == 0) {\r
695 return RETURN_INVALID_PARAMETER;\r
696 }\r
697\r
698 //\r
699 // Make sure that the page table is changeable.\r
700 //\r
701 IsWpEnabled = IsReadOnlyPageWriteProtected ();\r
702 if (IsWpEnabled) {\r
703 DisableReadOnlyPageWriteProtect ();\r
704 }\r
705\r
706 //\r
707 // If Cr3BaseAddress is not specified then read the current CR3\r
708 //\r
709 if (Cr3BaseAddress == 0) {\r
710 Cr3BaseAddress = AsmReadCr3 ();\r
711 }\r
712\r
713 //\r
714 // CPU will already have LA57 enabled so just check CR4\r
715 //\r
716 Cr4.UintN = AsmReadCr4 ();\r
717\r
718 Page5LevelSupport = (Cr4.Bits.LA57 ? TRUE : FALSE);\r
719 //\r
720 // If 5-level pages, adjust Cr3BaseAddress to point to first 4-level page directory,\r
721 // we will only have 1\r
722 //\r
723 if (Page5LevelSupport) {\r
724 Cr3BaseAddress = *(UINT64 *)Cr3BaseAddress & ~PgTableMask;\r
725 }\r
726\r
727 Status = EFI_SUCCESS;\r
728\r
729 while (Length) {\r
730 PageMapLevel4Entry = (VOID *)(Cr3BaseAddress & ~PgTableMask);\r
731 PageMapLevel4Entry += PML4_OFFSET (PhysicalAddress);\r
732 if (!PageMapLevel4Entry->Bits.Present) {\r
733 DEBUG ((\r
734 DEBUG_ERROR,\r
735 "%a:%a: bad PML4 for Physical=0x%Lx\n",\r
736 gEfiCallerBaseName,\r
737 __FUNCTION__,\r
738 PhysicalAddress\r
739 ));\r
740 Status = RETURN_NO_MAPPING;\r
741 goto Done;\r
742 }\r
743\r
744 PageDirectory1GEntry = (VOID *)(\r
745 (PageMapLevel4Entry->Bits.PageTableBaseAddress <<\r
746 12) & ~PgTableMask\r
747 );\r
748 PageDirectory1GEntry += PDP_OFFSET (PhysicalAddress);\r
749 if (!PageDirectory1GEntry->Bits.Present) {\r
750 DEBUG ((\r
751 DEBUG_ERROR,\r
752 "%a:%a: bad PDPE for Physical=0x%Lx\n",\r
753 gEfiCallerBaseName,\r
754 __FUNCTION__,\r
755 PhysicalAddress\r
756 ));\r
757 Status = RETURN_NO_MAPPING;\r
758 goto Done;\r
759 }\r
760\r
761 //\r
762 // If the MustBe1 bit is not 1, it's not actually a 1GB entry\r
763 //\r
764 if (PageDirectory1GEntry->Bits.MustBe1) {\r
765 //\r
766 // Valid 1GB page\r
767 // If we have at least 1GB to go, we can just update this entry\r
768 //\r
769 if (!(PhysicalAddress & (BIT30 - 1)) && (Length >= BIT30)) {\r
5c7a6113
MX
770 Status = SetOrClearSharedBit (&PageDirectory1GEntry->Uint64, Mode, PhysicalAddress, BIT30);\r
771 if (EFI_ERROR (Status)) {\r
772 goto Done;\r
773 }\r
774\r
5aa80186
MX
775 DEBUG ((\r
776 DEBUG_VERBOSE,\r
777 "%a:%a: updated 1GB entry for Physical=0x%Lx\n",\r
778 gEfiCallerBaseName,\r
779 __FUNCTION__,\r
780 PhysicalAddress\r
781 ));\r
782 PhysicalAddress += BIT30;\r
783 Length -= BIT30;\r
784 } else {\r
785 //\r
786 // We must split the page\r
787 //\r
788 DEBUG ((\r
789 DEBUG_VERBOSE,\r
790 "%a:%a: splitting 1GB page for Physical=0x%Lx\n",\r
791 gEfiCallerBaseName,\r
792 __FUNCTION__,\r
793 PhysicalAddress\r
794 ));\r
795 Split1GPageTo2M (\r
796 (UINT64)PageDirectory1GEntry->Bits.PageTableBaseAddress << 30,\r
797 (UINT64 *)PageDirectory1GEntry,\r
798 0,\r
799 0\r
800 );\r
801 continue;\r
802 }\r
803 } else {\r
804 //\r
805 // Actually a PDP\r
806 //\r
807 PageUpperDirectoryPointerEntry =\r
808 (PAGE_MAP_AND_DIRECTORY_POINTER *)PageDirectory1GEntry;\r
809 PageDirectory2MEntry =\r
810 (VOID *)(\r
811 (PageUpperDirectoryPointerEntry->Bits.PageTableBaseAddress <<\r
812 12) & ~PgTableMask\r
813 );\r
814 PageDirectory2MEntry += PDE_OFFSET (PhysicalAddress);\r
815 if (!PageDirectory2MEntry->Bits.Present) {\r
816 DEBUG ((\r
817 DEBUG_ERROR,\r
818 "%a:%a: bad PDE for Physical=0x%Lx\n",\r
819 gEfiCallerBaseName,\r
820 __FUNCTION__,\r
821 PhysicalAddress\r
822 ));\r
823 Status = RETURN_NO_MAPPING;\r
824 goto Done;\r
825 }\r
826\r
827 //\r
828 // If the MustBe1 bit is not a 1, it's not a 2MB entry\r
829 //\r
830 if (PageDirectory2MEntry->Bits.MustBe1) {\r
831 //\r
832 // Valid 2MB page\r
833 // If we have at least 2MB left to go, we can just update this entry\r
834 //\r
835 if (!(PhysicalAddress & (BIT21-1)) && (Length >= BIT21)) {\r
5c7a6113
MX
836 Status = SetOrClearSharedBit (&PageDirectory2MEntry->Uint64, Mode, PhysicalAddress, BIT21);\r
837 if (EFI_ERROR (Status)) {\r
838 goto Done;\r
839 }\r
840\r
5aa80186
MX
841 PhysicalAddress += BIT21;\r
842 Length -= BIT21;\r
843 } else {\r
844 //\r
845 // We must split up this page into 4K pages\r
846 //\r
847 DEBUG ((\r
848 DEBUG_VERBOSE,\r
849 "%a:%a: splitting 2MB page for Physical=0x%Lx\n",\r
850 gEfiCallerBaseName,\r
851 __FUNCTION__,\r
852 PhysicalAddress\r
853 ));\r
854\r
855 ActiveEncMask = PageDirectory2MEntry->Uint64 & AddressEncMask;\r
856\r
857 Split2MPageTo4K (\r
858 (UINT64)PageDirectory2MEntry->Bits.PageTableBaseAddress << 21,\r
859 (UINT64 *)PageDirectory2MEntry,\r
860 0,\r
861 0,\r
862 ActiveEncMask\r
863 );\r
864 continue;\r
865 }\r
866 } else {\r
867 PageDirectoryPointerEntry =\r
868 (PAGE_MAP_AND_DIRECTORY_POINTER *)PageDirectory2MEntry;\r
869 PageTableEntry =\r
870 (VOID *)(\r
871 (PageDirectoryPointerEntry->Bits.PageTableBaseAddress <<\r
872 12) & ~PgTableMask\r
873 );\r
874 PageTableEntry += PTE_OFFSET (PhysicalAddress);\r
875 if (!PageTableEntry->Bits.Present) {\r
876 DEBUG ((\r
877 DEBUG_ERROR,\r
878 "%a:%a: bad PTE for Physical=0x%Lx\n",\r
879 gEfiCallerBaseName,\r
880 __FUNCTION__,\r
881 PhysicalAddress\r
882 ));\r
883 Status = RETURN_NO_MAPPING;\r
884 goto Done;\r
885 }\r
886\r
5c7a6113
MX
887 Status = SetOrClearSharedBit (&PageTableEntry->Uint64, Mode, PhysicalAddress, EFI_PAGE_SIZE);\r
888 if (EFI_ERROR (Status)) {\r
889 goto Done;\r
890 }\r
891\r
5aa80186
MX
892 PhysicalAddress += EFI_PAGE_SIZE;\r
893 Length -= EFI_PAGE_SIZE;\r
894 }\r
895 }\r
896 }\r
897\r
898 //\r
899 // Protect the page table by marking the memory used for page table to be\r
900 // read-only.\r
901 //\r
902 if (IsWpEnabled) {\r
903 EnablePageTableProtection ((UINTN)PageMapLevel4Entry, TRUE);\r
904 }\r
905\r
906 //\r
907 // Flush TLB\r
908 //\r
909 CpuFlushTlb ();\r
910\r
911Done:\r
912 //\r
913 // Restore page table write protection, if any.\r
914 //\r
915 if (IsWpEnabled) {\r
916 EnableReadOnlyPageWriteProtect ();\r
917 }\r
918\r
919 return Status;\r
920}\r
921\r
922/**\r
923 This function clears memory shared bit for the memory region specified by\r
924 BaseAddress and NumPages from the current page table context.\r
925\r
926 @param[in] Cr3BaseAddress Cr3 Base Address (if zero then use\r
927 current CR3)\r
928 @param[in] BaseAddress The physical address that is the start\r
929 address of a memory region.\r
930 @param[in] NumPages The number of pages from start memory\r
931 region.\r
932\r
933 @retval RETURN_SUCCESS The attributes were cleared for the\r
934 memory region.\r
935 @retval RETURN_INVALID_PARAMETER Number of pages is zero.\r
936 @retval RETURN_UNSUPPORTED Clearing the memory encryption attribute\r
937 is not supported\r
938**/\r
939RETURN_STATUS\r
940EFIAPI\r
941MemEncryptTdxSetPageSharedBit (\r
942 IN PHYSICAL_ADDRESS Cr3BaseAddress,\r
943 IN PHYSICAL_ADDRESS BaseAddress,\r
944 IN UINTN NumPages\r
945 )\r
946{\r
947 return SetMemorySharedOrPrivate (\r
948 Cr3BaseAddress,\r
949 BaseAddress,\r
950 EFI_PAGES_TO_SIZE (NumPages),\r
951 SetSharedBit\r
952 );\r
953}\r
954\r
955/**\r
956 This function sets memory shared bit for the memory region specified by\r
957 BaseAddress and NumPages from the current page table context.\r
958\r
959 @param[in] Cr3BaseAddress Cr3 Base Address (if zero then use\r
960 current CR3)\r
961 @param[in] BaseAddress The physical address that is the start\r
962 address of a memory region.\r
963 @param[in] NumPages The number of pages from start memory\r
964 region.\r
965\r
966 @retval RETURN_SUCCESS The attributes were set for the memory\r
967 region.\r
968 @retval RETURN_INVALID_PARAMETER Number of pages is zero.\r
969 @retval RETURN_UNSUPPORTED Setting the memory encryption attribute\r
970 is not supported\r
971**/\r
972RETURN_STATUS\r
973EFIAPI\r
974MemEncryptTdxClearPageSharedBit (\r
975 IN PHYSICAL_ADDRESS Cr3BaseAddress,\r
976 IN PHYSICAL_ADDRESS BaseAddress,\r
977 IN UINTN NumPages\r
978 )\r
979{\r
980 return SetMemorySharedOrPrivate (\r
981 Cr3BaseAddress,\r
982 BaseAddress,\r
983 EFI_PAGES_TO_SIZE (NumPages),\r
984 ClearSharedBit\r
985 );\r
986}\r