]> git.proxmox.com Git - mirror_edk2.git/blame - OvmfPkg/Library/BaseMemEncryptSevLib/X64/PeiDxeVirtualMemory.c
OvmfPkg/MemEncryptSevLib: Make the MemEncryptSevLib available for SEC
[mirror_edk2.git] / OvmfPkg / Library / BaseMemEncryptSevLib / X64 / PeiDxeVirtualMemory.c
CommitLineData
a1f22614
BS
1/** @file\r
2\r
3 Virtual Memory Management Services to set or clear the memory encryption bit\r
4\r
699a2c30 5 Copyright (c) 2006 - 2018, Intel Corporation. All rights reserved.<BR>\r
45388d04 6 Copyright (c) 2017 - 2020, AMD Incorporated. All rights reserved.<BR>\r
a1f22614 7\r
b26f0cf9 8 SPDX-License-Identifier: BSD-2-Clause-Patent\r
a1f22614 9\r
4bd6bf31 10 Code is derived from MdeModulePkg/Core/DxeIplPeim/X64/VirtualMemory.c\r
a1f22614
BS
11\r
12**/\r
13\r
14#include <Library/CpuLib.h>\r
45388d04 15#include <Library/MemEncryptSevLib.h>\r
a1f22614 16#include <Register/Amd/Cpuid.h>\r
bd13ecf3 17#include <Register/Cpuid.h>\r
a1f22614
BS
18\r
19#include "VirtualMemory.h"\r
20\r
21STATIC BOOLEAN mAddressEncMaskChecked = FALSE;\r
22STATIC UINT64 mAddressEncMask;\r
b721aa74 23STATIC PAGE_TABLE_POOL *mPageTablePool = NULL;\r
a1f22614
BS
24\r
25typedef enum {\r
26 SetCBit,\r
27 ClearCBit\r
28} MAP_RANGE_MODE;\r
29\r
30/**\r
31 Get the memory encryption mask\r
32\r
33 @param[out] EncryptionMask contains the pte mask.\r
34\r
35**/\r
36STATIC\r
37UINT64\r
38GetMemEncryptionAddressMask (\r
39 VOID\r
40 )\r
41{\r
42 UINT64 EncryptionMask;\r
a1f22614
BS
43\r
44 if (mAddressEncMaskChecked) {\r
45 return mAddressEncMask;\r
46 }\r
47\r
45388d04 48 EncryptionMask = MemEncryptSevGetEncryptionMask ();\r
a1f22614
BS
49\r
50 mAddressEncMask = EncryptionMask & PAGING_1G_ADDRESS_MASK_64;\r
51 mAddressEncMaskChecked = TRUE;\r
52\r
53 return mAddressEncMask;\r
54}\r
55\r
b721aa74
BS
56/**\r
57 Initialize a buffer pool for page table use only.\r
58\r
59 To reduce the potential split operation on page table, the pages reserved for\r
60 page table should be allocated in the times of PAGE_TABLE_POOL_UNIT_PAGES and\r
61 at the boundary of PAGE_TABLE_POOL_ALIGNMENT. So the page pool is always\r
4bd6bf31
LE
62 initialized with number of pages greater than or equal to the given\r
63 PoolPages.\r
b721aa74
BS
64\r
65 Once the pages in the pool are used up, this method should be called again to\r
4bd6bf31
LE
66 reserve at least another PAGE_TABLE_POOL_UNIT_PAGES. Usually this won't\r
67 happen often in practice.\r
b721aa74
BS
68\r
69 @param[in] PoolPages The least page number of the pool to be created.\r
70\r
71 @retval TRUE The pool is initialized successfully.\r
72 @retval FALSE The memory is out of resource.\r
73**/\r
74STATIC\r
75BOOLEAN\r
76InitializePageTablePool (\r
77 IN UINTN PoolPages\r
78 )\r
79{\r
80 VOID *Buffer;\r
81\r
82 //\r
83 // Always reserve at least PAGE_TABLE_POOL_UNIT_PAGES, including one page for\r
84 // header.\r
85 //\r
86 PoolPages += 1; // Add one page for header.\r
87 PoolPages = ((PoolPages - 1) / PAGE_TABLE_POOL_UNIT_PAGES + 1) *\r
88 PAGE_TABLE_POOL_UNIT_PAGES;\r
89 Buffer = AllocateAlignedPages (PoolPages, PAGE_TABLE_POOL_ALIGNMENT);\r
90 if (Buffer == NULL) {\r
91 DEBUG ((DEBUG_ERROR, "ERROR: Out of aligned pages\r\n"));\r
92 return FALSE;\r
93 }\r
94\r
95 //\r
96 // Link all pools into a list for easier track later.\r
97 //\r
98 if (mPageTablePool == NULL) {\r
99 mPageTablePool = Buffer;\r
100 mPageTablePool->NextPool = mPageTablePool;\r
101 } else {\r
102 ((PAGE_TABLE_POOL *)Buffer)->NextPool = mPageTablePool->NextPool;\r
103 mPageTablePool->NextPool = Buffer;\r
104 mPageTablePool = Buffer;\r
105 }\r
106\r
107 //\r
108 // Reserve one page for pool header.\r
109 //\r
110 mPageTablePool->FreePages = PoolPages - 1;\r
111 mPageTablePool->Offset = EFI_PAGES_TO_SIZE (1);\r
112\r
113 return TRUE;\r
114}\r
115\r
116/**\r
117 This API provides a way to allocate memory for page table.\r
118\r
119 This API can be called more than once to allocate memory for page tables.\r
120\r
121 Allocates the number of 4KB pages and returns a pointer to the allocated\r
122 buffer. The buffer returned is aligned on a 4KB boundary.\r
123\r
124 If Pages is 0, then NULL is returned.\r
125 If there is not enough memory remaining to satisfy the request, then NULL is\r
126 returned.\r
127\r
128 @param Pages The number of 4 KB pages to allocate.\r
129\r
130 @return A pointer to the allocated buffer or NULL if allocation fails.\r
131\r
132**/\r
133STATIC\r
134VOID *\r
135EFIAPI\r
136AllocatePageTableMemory (\r
137 IN UINTN Pages\r
138 )\r
139{\r
140 VOID *Buffer;\r
141\r
142 if (Pages == 0) {\r
143 return NULL;\r
144 }\r
145\r
146 //\r
147 // Renew the pool if necessary.\r
148 //\r
149 if (mPageTablePool == NULL ||\r
150 Pages > mPageTablePool->FreePages) {\r
151 if (!InitializePageTablePool (Pages)) {\r
152 return NULL;\r
153 }\r
154 }\r
155\r
156 Buffer = (UINT8 *)mPageTablePool + mPageTablePool->Offset;\r
157\r
158 mPageTablePool->Offset += EFI_PAGES_TO_SIZE (Pages);\r
159 mPageTablePool->FreePages -= Pages;\r
160\r
161 DEBUG ((\r
162 DEBUG_VERBOSE,\r
163 "%a:%a: Buffer=0x%Lx Pages=%ld\n",\r
164 gEfiCallerBaseName,\r
165 __FUNCTION__,\r
166 Buffer,\r
167 Pages\r
168 ));\r
169\r
170 return Buffer;\r
171}\r
172\r
173\r
a1f22614
BS
174/**\r
175 Split 2M page to 4K.\r
176\r
4bd6bf31
LE
177 @param[in] PhysicalAddress Start physical address the 2M page\r
178 covered.\r
a1f22614
BS
179 @param[in, out] PageEntry2M Pointer to 2M page entry.\r
180 @param[in] StackBase Stack base address.\r
181 @param[in] StackSize Stack size.\r
182\r
183**/\r
184STATIC\r
185VOID\r
186Split2MPageTo4K (\r
187 IN PHYSICAL_ADDRESS PhysicalAddress,\r
188 IN OUT UINT64 *PageEntry2M,\r
189 IN PHYSICAL_ADDRESS StackBase,\r
190 IN UINTN StackSize\r
191 )\r
192{\r
193 PHYSICAL_ADDRESS PhysicalAddress4K;\r
194 UINTN IndexOfPageTableEntries;\r
60b195d2
TL
195 PAGE_TABLE_4K_ENTRY *PageTableEntry;\r
196 PAGE_TABLE_4K_ENTRY *PageTableEntry1;\r
a1f22614
BS
197 UINT64 AddressEncMask;\r
198\r
b721aa74 199 PageTableEntry = AllocatePageTableMemory(1);\r
a1f22614
BS
200\r
201 PageTableEntry1 = PageTableEntry;\r
202\r
203 AddressEncMask = GetMemEncryptionAddressMask ();\r
204\r
205 ASSERT (PageTableEntry != NULL);\r
206 ASSERT (*PageEntry2M & AddressEncMask);\r
207\r
208 PhysicalAddress4K = PhysicalAddress;\r
4bd6bf31
LE
209 for (IndexOfPageTableEntries = 0;\r
210 IndexOfPageTableEntries < 512;\r
211 (IndexOfPageTableEntries++,\r
212 PageTableEntry++,\r
213 PhysicalAddress4K += SIZE_4KB)) {\r
a1f22614
BS
214 //\r
215 // Fill in the Page Table entries\r
216 //\r
217 PageTableEntry->Uint64 = (UINT64) PhysicalAddress4K | AddressEncMask;\r
218 PageTableEntry->Bits.ReadWrite = 1;\r
219 PageTableEntry->Bits.Present = 1;\r
4bd6bf31
LE
220 if ((PhysicalAddress4K >= StackBase) &&\r
221 (PhysicalAddress4K < StackBase + StackSize)) {\r
a1f22614
BS
222 //\r
223 // Set Nx bit for stack.\r
224 //\r
225 PageTableEntry->Bits.Nx = 1;\r
226 }\r
227 }\r
228\r
229 //\r
230 // Fill in 2M page entry.\r
231 //\r
4bd6bf31
LE
232 *PageEntry2M = ((UINT64)(UINTN)PageTableEntry1 |\r
233 IA32_PG_P | IA32_PG_RW | AddressEncMask);\r
a1f22614
BS
234}\r
235\r
b721aa74
BS
236/**\r
237 Set one page of page table pool memory to be read-only.\r
238\r
239 @param[in] PageTableBase Base address of page table (CR3).\r
240 @param[in] Address Start address of a page to be set as read-only.\r
241 @param[in] Level4Paging Level 4 paging flag.\r
242\r
243**/\r
244STATIC\r
245VOID\r
246SetPageTablePoolReadOnly (\r
247 IN UINTN PageTableBase,\r
248 IN EFI_PHYSICAL_ADDRESS Address,\r
249 IN BOOLEAN Level4Paging\r
250 )\r
251{\r
252 UINTN Index;\r
253 UINTN EntryIndex;\r
254 UINT64 AddressEncMask;\r
255 EFI_PHYSICAL_ADDRESS PhysicalAddress;\r
256 UINT64 *PageTable;\r
257 UINT64 *NewPageTable;\r
258 UINT64 PageAttr;\r
259 UINT64 LevelSize[5];\r
260 UINT64 LevelMask[5];\r
261 UINTN LevelShift[5];\r
262 UINTN Level;\r
263 UINT64 PoolUnitSize;\r
264\r
265 ASSERT (PageTableBase != 0);\r
266\r
267 //\r
268 // Since the page table is always from page table pool, which is always\r
269 // located at the boundary of PcdPageTablePoolAlignment, we just need to\r
270 // set the whole pool unit to be read-only.\r
271 //\r
272 Address = Address & PAGE_TABLE_POOL_ALIGN_MASK;\r
273\r
274 LevelShift[1] = PAGING_L1_ADDRESS_SHIFT;\r
275 LevelShift[2] = PAGING_L2_ADDRESS_SHIFT;\r
276 LevelShift[3] = PAGING_L3_ADDRESS_SHIFT;\r
277 LevelShift[4] = PAGING_L4_ADDRESS_SHIFT;\r
278\r
279 LevelMask[1] = PAGING_4K_ADDRESS_MASK_64;\r
280 LevelMask[2] = PAGING_2M_ADDRESS_MASK_64;\r
281 LevelMask[3] = PAGING_1G_ADDRESS_MASK_64;\r
282 LevelMask[4] = PAGING_1G_ADDRESS_MASK_64;\r
283\r
284 LevelSize[1] = SIZE_4KB;\r
285 LevelSize[2] = SIZE_2MB;\r
286 LevelSize[3] = SIZE_1GB;\r
287 LevelSize[4] = SIZE_512GB;\r
288\r
45388d04 289 AddressEncMask = GetMemEncryptionAddressMask();\r
b721aa74
BS
290 PageTable = (UINT64 *)(UINTN)PageTableBase;\r
291 PoolUnitSize = PAGE_TABLE_POOL_UNIT_SIZE;\r
292\r
293 for (Level = (Level4Paging) ? 4 : 3; Level > 0; --Level) {\r
294 Index = ((UINTN)RShiftU64 (Address, LevelShift[Level]));\r
295 Index &= PAGING_PAE_INDEX_MASK;\r
296\r
297 PageAttr = PageTable[Index];\r
298 if ((PageAttr & IA32_PG_PS) == 0) {\r
299 //\r
300 // Go to next level of table.\r
301 //\r
302 PageTable = (UINT64 *)(UINTN)(PageAttr & ~AddressEncMask &\r
303 PAGING_4K_ADDRESS_MASK_64);\r
304 continue;\r
305 }\r
306\r
307 if (PoolUnitSize >= LevelSize[Level]) {\r
308 //\r
309 // Clear R/W bit if current page granularity is not larger than pool unit\r
310 // size.\r
311 //\r
312 if ((PageAttr & IA32_PG_RW) != 0) {\r
313 while (PoolUnitSize > 0) {\r
314 //\r
315 // PAGE_TABLE_POOL_UNIT_SIZE and PAGE_TABLE_POOL_ALIGNMENT are fit in\r
316 // one page (2MB). Then we don't need to update attributes for pages\r
317 // crossing page directory. ASSERT below is for that purpose.\r
318 //\r
319 ASSERT (Index < EFI_PAGE_SIZE/sizeof (UINT64));\r
320\r
321 PageTable[Index] &= ~(UINT64)IA32_PG_RW;\r
322 PoolUnitSize -= LevelSize[Level];\r
323\r
324 ++Index;\r
325 }\r
326 }\r
327\r
328 break;\r
329\r
330 } else {\r
331 //\r
332 // The smaller granularity of page must be needed.\r
333 //\r
334 ASSERT (Level > 1);\r
335\r
336 NewPageTable = AllocatePageTableMemory (1);\r
337 ASSERT (NewPageTable != NULL);\r
338\r
339 PhysicalAddress = PageAttr & LevelMask[Level];\r
340 for (EntryIndex = 0;\r
341 EntryIndex < EFI_PAGE_SIZE/sizeof (UINT64);\r
342 ++EntryIndex) {\r
343 NewPageTable[EntryIndex] = PhysicalAddress | AddressEncMask |\r
344 IA32_PG_P | IA32_PG_RW;\r
345 if (Level > 2) {\r
346 NewPageTable[EntryIndex] |= IA32_PG_PS;\r
347 }\r
348 PhysicalAddress += LevelSize[Level - 1];\r
349 }\r
350\r
351 PageTable[Index] = (UINT64)(UINTN)NewPageTable | AddressEncMask |\r
352 IA32_PG_P | IA32_PG_RW;\r
353 PageTable = NewPageTable;\r
354 }\r
355 }\r
356}\r
357\r
358/**\r
359 Prevent the memory pages used for page table from been overwritten.\r
360\r
361 @param[in] PageTableBase Base address of page table (CR3).\r
362 @param[in] Level4Paging Level 4 paging flag.\r
363\r
364**/\r
365STATIC\r
366VOID\r
367EnablePageTableProtection (\r
368 IN UINTN PageTableBase,\r
369 IN BOOLEAN Level4Paging\r
370 )\r
371{\r
372 PAGE_TABLE_POOL *HeadPool;\r
373 PAGE_TABLE_POOL *Pool;\r
374 UINT64 PoolSize;\r
375 EFI_PHYSICAL_ADDRESS Address;\r
376\r
377 if (mPageTablePool == NULL) {\r
378 return;\r
379 }\r
380\r
381 //\r
382 // SetPageTablePoolReadOnly might update mPageTablePool. It's safer to\r
383 // remember original one in advance.\r
384 //\r
385 HeadPool = mPageTablePool;\r
386 Pool = HeadPool;\r
387 do {\r
388 Address = (EFI_PHYSICAL_ADDRESS)(UINTN)Pool;\r
389 PoolSize = Pool->Offset + EFI_PAGES_TO_SIZE (Pool->FreePages);\r
390\r
391 //\r
4bd6bf31
LE
392 // The size of one pool must be multiple of PAGE_TABLE_POOL_UNIT_SIZE,\r
393 // which is one of page size of the processor (2MB by default). Let's apply\r
394 // the protection to them one by one.\r
b721aa74
BS
395 //\r
396 while (PoolSize > 0) {\r
397 SetPageTablePoolReadOnly(PageTableBase, Address, Level4Paging);\r
398 Address += PAGE_TABLE_POOL_UNIT_SIZE;\r
399 PoolSize -= PAGE_TABLE_POOL_UNIT_SIZE;\r
400 }\r
401\r
402 Pool = Pool->NextPool;\r
403 } while (Pool != HeadPool);\r
404\r
405}\r
406\r
407\r
a1f22614
BS
408/**\r
409 Split 1G page to 2M.\r
410\r
4bd6bf31
LE
411 @param[in] PhysicalAddress Start physical address the 1G page\r
412 covered.\r
a1f22614
BS
413 @param[in, out] PageEntry1G Pointer to 1G page entry.\r
414 @param[in] StackBase Stack base address.\r
415 @param[in] StackSize Stack size.\r
416\r
417**/\r
418STATIC\r
419VOID\r
420Split1GPageTo2M (\r
421 IN PHYSICAL_ADDRESS PhysicalAddress,\r
422 IN OUT UINT64 *PageEntry1G,\r
423 IN PHYSICAL_ADDRESS StackBase,\r
424 IN UINTN StackSize\r
425 )\r
426{\r
427 PHYSICAL_ADDRESS PhysicalAddress2M;\r
428 UINTN IndexOfPageDirectoryEntries;\r
429 PAGE_TABLE_ENTRY *PageDirectoryEntry;\r
430 UINT64 AddressEncMask;\r
431\r
b721aa74 432 PageDirectoryEntry = AllocatePageTableMemory(1);\r
a1f22614
BS
433\r
434 AddressEncMask = GetMemEncryptionAddressMask ();\r
435 ASSERT (PageDirectoryEntry != NULL);\r
45388d04 436 ASSERT (*PageEntry1G & AddressEncMask);\r
a1f22614
BS
437 //\r
438 // Fill in 1G page entry.\r
439 //\r
4bd6bf31
LE
440 *PageEntry1G = ((UINT64)(UINTN)PageDirectoryEntry |\r
441 IA32_PG_P | IA32_PG_RW | AddressEncMask);\r
a1f22614
BS
442\r
443 PhysicalAddress2M = PhysicalAddress;\r
4bd6bf31
LE
444 for (IndexOfPageDirectoryEntries = 0;\r
445 IndexOfPageDirectoryEntries < 512;\r
446 (IndexOfPageDirectoryEntries++,\r
447 PageDirectoryEntry++,\r
448 PhysicalAddress2M += SIZE_2MB)) {\r
449 if ((PhysicalAddress2M < StackBase + StackSize) &&\r
450 ((PhysicalAddress2M + SIZE_2MB) > StackBase)) {\r
a1f22614
BS
451 //\r
452 // Need to split this 2M page that covers stack range.\r
453 //\r
4bd6bf31
LE
454 Split2MPageTo4K (\r
455 PhysicalAddress2M,\r
456 (UINT64 *)PageDirectoryEntry,\r
457 StackBase,\r
458 StackSize\r
459 );\r
a1f22614
BS
460 } else {\r
461 //\r
462 // Fill in the Page Directory entries\r
463 //\r
464 PageDirectoryEntry->Uint64 = (UINT64) PhysicalAddress2M | AddressEncMask;\r
465 PageDirectoryEntry->Bits.ReadWrite = 1;\r
466 PageDirectoryEntry->Bits.Present = 1;\r
467 PageDirectoryEntry->Bits.MustBe1 = 1;\r
468 }\r
469 }\r
470}\r
471\r
472\r
473/**\r
474 Set or Clear the memory encryption bit\r
475\r
60b195d2 476 @param[in, out] PageTablePointer Page table entry pointer (PTE).\r
a1f22614
BS
477 @param[in] Mode Set or Clear encryption bit\r
478\r
479**/\r
480STATIC VOID\r
481SetOrClearCBit(\r
482 IN OUT UINT64* PageTablePointer,\r
483 IN MAP_RANGE_MODE Mode\r
484 )\r
485{\r
486 UINT64 AddressEncMask;\r
487\r
488 AddressEncMask = GetMemEncryptionAddressMask ();\r
489\r
490 if (Mode == SetCBit) {\r
491 *PageTablePointer |= AddressEncMask;\r
492 } else {\r
493 *PageTablePointer &= ~AddressEncMask;\r
494 }\r
495\r
496}\r
497\r
b721aa74
BS
498/**\r
499 Check the WP status in CR0 register. This bit is used to lock or unlock write\r
500 access to pages marked as read-only.\r
501\r
502 @retval TRUE Write protection is enabled.\r
503 @retval FALSE Write protection is disabled.\r
504**/\r
505STATIC\r
506BOOLEAN\r
507IsReadOnlyPageWriteProtected (\r
508 VOID\r
509 )\r
510{\r
511 return ((AsmReadCr0 () & BIT16) != 0);\r
512}\r
513\r
514\r
515/**\r
516 Disable Write Protect on pages marked as read-only.\r
517**/\r
518STATIC\r
519VOID\r
520DisableReadOnlyPageWriteProtect (\r
521 VOID\r
522 )\r
523{\r
524 AsmWriteCr0 (AsmReadCr0() & ~BIT16);\r
525}\r
526\r
527/**\r
528 Enable Write Protect on pages marked as read-only.\r
529**/\r
530VOID\r
531EnableReadOnlyPageWriteProtect (\r
532 VOID\r
533 )\r
534{\r
535 AsmWriteCr0 (AsmReadCr0() | BIT16);\r
536}\r
537\r
538\r
a1f22614 539/**\r
4bd6bf31 540 This function either sets or clears memory encryption bit for the memory\r
cde8c568 541 region specified by PhysicalAddress and Length from the current page table\r
4bd6bf31 542 context.\r
a1f22614 543\r
cde8c568 544 The function iterates through the PhysicalAddress one page at a time, and set\r
a1f22614
BS
545 or clears the memory encryption mask in the page table. If it encounters\r
546 that a given physical address range is part of large page then it attempts to\r
547 change the attribute at one go (based on size), otherwise it splits the\r
548 large pages into smaller (e.g 2M page into 4K pages) and then try to set or\r
549 clear the encryption bit on the smallest page size.\r
550\r
cde8c568
LE
551 @param[in] Cr3BaseAddress Cr3 Base Address (if zero then use\r
552 current CR3)\r
a1f22614
BS
553 @param[in] PhysicalAddress The physical address that is the start\r
554 address of a memory region.\r
555 @param[in] Length The length of memory region\r
556 @param[in] Mode Set or Clear mode\r
cde8c568 557 @param[in] CacheFlush Flush the caches before applying the\r
a1f22614
BS
558 encryption mask\r
559\r
4bd6bf31
LE
560 @retval RETURN_SUCCESS The attributes were cleared for the\r
561 memory region.\r
a1f22614 562 @retval RETURN_INVALID_PARAMETER Number of pages is zero.\r
4bd6bf31
LE
563 @retval RETURN_UNSUPPORTED Setting the memory encyrption attribute\r
564 is not supported\r
a1f22614 565**/\r
a1f22614
BS
566STATIC\r
567RETURN_STATUS\r
568EFIAPI\r
569SetMemoryEncDec (\r
570 IN PHYSICAL_ADDRESS Cr3BaseAddress,\r
571 IN PHYSICAL_ADDRESS PhysicalAddress,\r
572 IN UINTN Length,\r
573 IN MAP_RANGE_MODE Mode,\r
574 IN BOOLEAN CacheFlush\r
575 )\r
576{\r
577 PAGE_MAP_AND_DIRECTORY_POINTER *PageMapLevel4Entry;\r
578 PAGE_MAP_AND_DIRECTORY_POINTER *PageUpperDirectoryPointerEntry;\r
579 PAGE_MAP_AND_DIRECTORY_POINTER *PageDirectoryPointerEntry;\r
580 PAGE_TABLE_1G_ENTRY *PageDirectory1GEntry;\r
581 PAGE_TABLE_ENTRY *PageDirectory2MEntry;\r
582 PAGE_TABLE_4K_ENTRY *PageTableEntry;\r
583 UINT64 PgTableMask;\r
584 UINT64 AddressEncMask;\r
b721aa74
BS
585 BOOLEAN IsWpEnabled;\r
586 RETURN_STATUS Status;\r
a1f22614 587\r
699a2c30
DB
588 //\r
589 // Set PageMapLevel4Entry to suppress incorrect compiler/analyzer warnings.\r
590 //\r
591 PageMapLevel4Entry = NULL;\r
592\r
70063aec
LE
593 DEBUG ((\r
594 DEBUG_VERBOSE,\r
595 "%a:%a: Cr3Base=0x%Lx Physical=0x%Lx Length=0x%Lx Mode=%a CacheFlush=%u\n",\r
596 gEfiCallerBaseName,\r
597 __FUNCTION__,\r
598 Cr3BaseAddress,\r
599 PhysicalAddress,\r
600 (UINT64)Length,\r
601 (Mode == SetCBit) ? "Encrypt" : "Decrypt",\r
602 (UINT32)CacheFlush\r
603 ));\r
604\r
a1f22614
BS
605 //\r
606 // Check if we have a valid memory encryption mask\r
607 //\r
608 AddressEncMask = GetMemEncryptionAddressMask ();\r
609 if (!AddressEncMask) {\r
610 return RETURN_ACCESS_DENIED;\r
611 }\r
612\r
613 PgTableMask = AddressEncMask | EFI_PAGE_MASK;\r
614\r
615 if (Length == 0) {\r
616 return RETURN_INVALID_PARAMETER;\r
617 }\r
618\r
619 //\r
620 // We are going to change the memory encryption attribute from C=0 -> C=1 or\r
4bd6bf31
LE
621 // vice versa Flush the caches to ensure that data is written into memory\r
622 // with correct C-bit\r
a1f22614
BS
623 //\r
624 if (CacheFlush) {\r
625 WriteBackInvalidateDataCacheRange((VOID*) (UINTN)PhysicalAddress, Length);\r
626 }\r
627\r
b721aa74
BS
628 //\r
629 // Make sure that the page table is changeable.\r
630 //\r
631 IsWpEnabled = IsReadOnlyPageWriteProtected ();\r
632 if (IsWpEnabled) {\r
633 DisableReadOnlyPageWriteProtect ();\r
634 }\r
635\r
636 Status = EFI_SUCCESS;\r
637\r
60b195d2 638 while (Length != 0)\r
a1f22614
BS
639 {\r
640 //\r
641 // If Cr3BaseAddress is not specified then read the current CR3\r
642 //\r
643 if (Cr3BaseAddress == 0) {\r
644 Cr3BaseAddress = AsmReadCr3();\r
645 }\r
646\r
647 PageMapLevel4Entry = (VOID*) (Cr3BaseAddress & ~PgTableMask);\r
648 PageMapLevel4Entry += PML4_OFFSET(PhysicalAddress);\r
649 if (!PageMapLevel4Entry->Bits.Present) {\r
6692af92 650 DEBUG ((\r
3728ea5a
LE
651 DEBUG_ERROR,\r
652 "%a:%a: bad PML4 for Physical=0x%Lx\n",\r
6692af92
LE
653 gEfiCallerBaseName,\r
654 __FUNCTION__,\r
655 PhysicalAddress\r
656 ));\r
b721aa74
BS
657 Status = RETURN_NO_MAPPING;\r
658 goto Done;\r
a1f22614
BS
659 }\r
660\r
4bd6bf31
LE
661 PageDirectory1GEntry = (VOID *)(\r
662 (PageMapLevel4Entry->Bits.PageTableBaseAddress <<\r
663 12) & ~PgTableMask\r
664 );\r
a1f22614
BS
665 PageDirectory1GEntry += PDP_OFFSET(PhysicalAddress);\r
666 if (!PageDirectory1GEntry->Bits.Present) {\r
6692af92 667 DEBUG ((\r
3728ea5a
LE
668 DEBUG_ERROR,\r
669 "%a:%a: bad PDPE for Physical=0x%Lx\n",\r
6692af92
LE
670 gEfiCallerBaseName,\r
671 __FUNCTION__,\r
672 PhysicalAddress\r
673 ));\r
b721aa74
BS
674 Status = RETURN_NO_MAPPING;\r
675 goto Done;\r
a1f22614
BS
676 }\r
677\r
678 //\r
679 // If the MustBe1 bit is not 1, it's not actually a 1GB entry\r
680 //\r
681 if (PageDirectory1GEntry->Bits.MustBe1) {\r
682 //\r
683 // Valid 1GB page\r
684 // If we have at least 1GB to go, we can just update this entry\r
685 //\r
60b195d2 686 if ((PhysicalAddress & (BIT30 - 1)) == 0 && Length >= BIT30) {\r
a1f22614 687 SetOrClearCBit(&PageDirectory1GEntry->Uint64, Mode);\r
6692af92
LE
688 DEBUG ((\r
689 DEBUG_VERBOSE,\r
5597edfa 690 "%a:%a: updated 1GB entry for Physical=0x%Lx\n",\r
6692af92
LE
691 gEfiCallerBaseName,\r
692 __FUNCTION__,\r
693 PhysicalAddress\r
694 ));\r
a1f22614
BS
695 PhysicalAddress += BIT30;\r
696 Length -= BIT30;\r
697 } else {\r
698 //\r
699 // We must split the page\r
700 //\r
6692af92
LE
701 DEBUG ((\r
702 DEBUG_VERBOSE,\r
d8d33741 703 "%a:%a: splitting 1GB page for Physical=0x%Lx\n",\r
6692af92 704 gEfiCallerBaseName,\r
631bd7e0
LE
705 __FUNCTION__,\r
706 PhysicalAddress\r
6692af92 707 ));\r
4bd6bf31
LE
708 Split1GPageTo2M (\r
709 (UINT64)PageDirectory1GEntry->Bits.PageTableBaseAddress << 30,\r
710 (UINT64 *)PageDirectory1GEntry,\r
711 0,\r
712 0\r
713 );\r
a1f22614
BS
714 continue;\r
715 }\r
716 } else {\r
717 //\r
718 // Actually a PDP\r
719 //\r
4bd6bf31
LE
720 PageUpperDirectoryPointerEntry =\r
721 (PAGE_MAP_AND_DIRECTORY_POINTER *)PageDirectory1GEntry;\r
722 PageDirectory2MEntry =\r
723 (VOID *)(\r
724 (PageUpperDirectoryPointerEntry->Bits.PageTableBaseAddress <<\r
725 12) & ~PgTableMask\r
726 );\r
a1f22614
BS
727 PageDirectory2MEntry += PDE_OFFSET(PhysicalAddress);\r
728 if (!PageDirectory2MEntry->Bits.Present) {\r
6692af92 729 DEBUG ((\r
3728ea5a
LE
730 DEBUG_ERROR,\r
731 "%a:%a: bad PDE for Physical=0x%Lx\n",\r
6692af92
LE
732 gEfiCallerBaseName,\r
733 __FUNCTION__,\r
734 PhysicalAddress\r
735 ));\r
b721aa74
BS
736 Status = RETURN_NO_MAPPING;\r
737 goto Done;\r
a1f22614
BS
738 }\r
739 //\r
740 // If the MustBe1 bit is not a 1, it's not a 2MB entry\r
741 //\r
742 if (PageDirectory2MEntry->Bits.MustBe1) {\r
743 //\r
744 // Valid 2MB page\r
745 // If we have at least 2MB left to go, we can just update this entry\r
746 //\r
60b195d2 747 if ((PhysicalAddress & (BIT21-1)) == 0 && Length >= BIT21) {\r
a1f22614
BS
748 SetOrClearCBit (&PageDirectory2MEntry->Uint64, Mode);\r
749 PhysicalAddress += BIT21;\r
750 Length -= BIT21;\r
751 } else {\r
752 //\r
753 // We must split up this page into 4K pages\r
754 //\r
6692af92
LE
755 DEBUG ((\r
756 DEBUG_VERBOSE,\r
d8d33741 757 "%a:%a: splitting 2MB page for Physical=0x%Lx\n",\r
6692af92
LE
758 gEfiCallerBaseName,\r
759 __FUNCTION__,\r
760 PhysicalAddress\r
761 ));\r
4bd6bf31
LE
762 Split2MPageTo4K (\r
763 (UINT64)PageDirectory2MEntry->Bits.PageTableBaseAddress << 21,\r
764 (UINT64 *)PageDirectory2MEntry,\r
765 0,\r
766 0\r
767 );\r
a1f22614
BS
768 continue;\r
769 }\r
770 } else {\r
4bd6bf31
LE
771 PageDirectoryPointerEntry =\r
772 (PAGE_MAP_AND_DIRECTORY_POINTER *)PageDirectory2MEntry;\r
773 PageTableEntry =\r
774 (VOID *)(\r
775 (PageDirectoryPointerEntry->Bits.PageTableBaseAddress <<\r
776 12) & ~PgTableMask\r
777 );\r
a1f22614
BS
778 PageTableEntry += PTE_OFFSET(PhysicalAddress);\r
779 if (!PageTableEntry->Bits.Present) {\r
6692af92 780 DEBUG ((\r
3728ea5a
LE
781 DEBUG_ERROR,\r
782 "%a:%a: bad PTE for Physical=0x%Lx\n",\r
6692af92
LE
783 gEfiCallerBaseName,\r
784 __FUNCTION__,\r
785 PhysicalAddress\r
786 ));\r
b721aa74
BS
787 Status = RETURN_NO_MAPPING;\r
788 goto Done;\r
a1f22614
BS
789 }\r
790 SetOrClearCBit (&PageTableEntry->Uint64, Mode);\r
791 PhysicalAddress += EFI_PAGE_SIZE;\r
792 Length -= EFI_PAGE_SIZE;\r
793 }\r
794 }\r
795 }\r
796\r
b721aa74
BS
797 //\r
798 // Protect the page table by marking the memory used for page table to be\r
799 // read-only.\r
800 //\r
801 if (IsWpEnabled) {\r
802 EnablePageTableProtection ((UINTN)PageMapLevel4Entry, TRUE);\r
803 }\r
804\r
a1f22614
BS
805 //\r
806 // Flush TLB\r
807 //\r
808 CpuFlushTlb();\r
809\r
b721aa74
BS
810Done:\r
811 //\r
812 // Restore page table write protection, if any.\r
813 //\r
814 if (IsWpEnabled) {\r
815 EnableReadOnlyPageWriteProtect ();\r
816 }\r
817\r
818 return Status;\r
a1f22614
BS
819}\r
820\r
821/**\r
822 This function clears memory encryption bit for the memory region specified by\r
1532e5d5 823 PhysicalAddress and Length from the current page table context.\r
a1f22614 824\r
1532e5d5
LE
825 @param[in] Cr3BaseAddress Cr3 Base Address (if zero then use\r
826 current CR3)\r
a1f22614
BS
827 @param[in] PhysicalAddress The physical address that is the start\r
828 address of a memory region.\r
829 @param[in] Length The length of memory region\r
830 @param[in] Flush Flush the caches before applying the\r
831 encryption mask\r
832\r
4bd6bf31
LE
833 @retval RETURN_SUCCESS The attributes were cleared for the\r
834 memory region.\r
a1f22614 835 @retval RETURN_INVALID_PARAMETER Number of pages is zero.\r
1532e5d5 836 @retval RETURN_UNSUPPORTED Clearing the memory encyrption attribute\r
4bd6bf31 837 is not supported\r
a1f22614
BS
838**/\r
839RETURN_STATUS\r
840EFIAPI\r
841InternalMemEncryptSevSetMemoryDecrypted (\r
842 IN PHYSICAL_ADDRESS Cr3BaseAddress,\r
843 IN PHYSICAL_ADDRESS PhysicalAddress,\r
844 IN UINTN Length,\r
845 IN BOOLEAN Flush\r
846 )\r
847{\r
848\r
4bd6bf31
LE
849 return SetMemoryEncDec (\r
850 Cr3BaseAddress,\r
851 PhysicalAddress,\r
852 Length,\r
853 ClearCBit,\r
854 Flush\r
855 );\r
a1f22614
BS
856}\r
857\r
858/**\r
859 This function sets memory encryption bit for the memory region specified by\r
68e60a38 860 PhysicalAddress and Length from the current page table context.\r
a1f22614 861\r
68e60a38
LE
862 @param[in] Cr3BaseAddress Cr3 Base Address (if zero then use\r
863 current CR3)\r
4bd6bf31
LE
864 @param[in] PhysicalAddress The physical address that is the start\r
865 address of a memory region.\r
a1f22614
BS
866 @param[in] Length The length of memory region\r
867 @param[in] Flush Flush the caches before applying the\r
868 encryption mask\r
869\r
68e60a38
LE
870 @retval RETURN_SUCCESS The attributes were set for the memory\r
871 region.\r
a1f22614 872 @retval RETURN_INVALID_PARAMETER Number of pages is zero.\r
4bd6bf31
LE
873 @retval RETURN_UNSUPPORTED Setting the memory encyrption attribute\r
874 is not supported\r
a1f22614
BS
875**/\r
876RETURN_STATUS\r
877EFIAPI\r
878InternalMemEncryptSevSetMemoryEncrypted (\r
879 IN PHYSICAL_ADDRESS Cr3BaseAddress,\r
880 IN PHYSICAL_ADDRESS PhysicalAddress,\r
881 IN UINTN Length,\r
882 IN BOOLEAN Flush\r
883 )\r
884{\r
4bd6bf31
LE
885 return SetMemoryEncDec (\r
886 Cr3BaseAddress,\r
887 PhysicalAddress,\r
888 Length,\r
889 SetCBit,\r
890 Flush\r
891 );\r
a1f22614 892}\r