]> git.proxmox.com Git - mirror_edk2.git/blame - OvmfPkg/Library/BaseMemEncryptSevLib/X64/PeiDxeVirtualMemory.c
OvmfPkg/BaseMemEncryptSevLib: introduce MemEncryptSevClearMmioPageEncMask()
[mirror_edk2.git] / OvmfPkg / Library / BaseMemEncryptSevLib / X64 / PeiDxeVirtualMemory.c
CommitLineData
a1f22614
BS
1/** @file\r
2\r
3 Virtual Memory Management Services to set or clear the memory encryption bit\r
4\r
699a2c30 5 Copyright (c) 2006 - 2018, Intel Corporation. All rights reserved.<BR>\r
45388d04 6 Copyright (c) 2017 - 2020, AMD Incorporated. All rights reserved.<BR>\r
a1f22614 7\r
b26f0cf9 8 SPDX-License-Identifier: BSD-2-Clause-Patent\r
a1f22614 9\r
4bd6bf31 10 Code is derived from MdeModulePkg/Core/DxeIplPeim/X64/VirtualMemory.c\r
a1f22614
BS
11\r
12**/\r
13\r
14#include <Library/CpuLib.h>\r
45388d04 15#include <Library/MemEncryptSevLib.h>\r
a1f22614 16#include <Register/Amd/Cpuid.h>\r
bd13ecf3 17#include <Register/Cpuid.h>\r
a1f22614
BS
18\r
19#include "VirtualMemory.h"\r
20\r
21STATIC BOOLEAN mAddressEncMaskChecked = FALSE;\r
22STATIC UINT64 mAddressEncMask;\r
b721aa74 23STATIC PAGE_TABLE_POOL *mPageTablePool = NULL;\r
a1f22614
BS
24\r
25typedef enum {\r
26 SetCBit,\r
27 ClearCBit\r
28} MAP_RANGE_MODE;\r
29\r
30/**\r
c330af02 31 Return the pagetable memory encryption mask.\r
a1f22614 32\r
c330af02 33 @return The pagetable memory encryption mask.\r
a1f22614
BS
34\r
35**/\r
a1f22614 36UINT64\r
c330af02
TL
37EFIAPI\r
38InternalGetMemEncryptionAddressMask (\r
a1f22614
BS
39 VOID\r
40 )\r
41{\r
42 UINT64 EncryptionMask;\r
a1f22614
BS
43\r
44 if (mAddressEncMaskChecked) {\r
45 return mAddressEncMask;\r
46 }\r
47\r
45388d04 48 EncryptionMask = MemEncryptSevGetEncryptionMask ();\r
a1f22614
BS
49\r
50 mAddressEncMask = EncryptionMask & PAGING_1G_ADDRESS_MASK_64;\r
51 mAddressEncMaskChecked = TRUE;\r
52\r
53 return mAddressEncMask;\r
54}\r
55\r
b721aa74
BS
56/**\r
57 Initialize a buffer pool for page table use only.\r
58\r
59 To reduce the potential split operation on page table, the pages reserved for\r
60 page table should be allocated in the times of PAGE_TABLE_POOL_UNIT_PAGES and\r
61 at the boundary of PAGE_TABLE_POOL_ALIGNMENT. So the page pool is always\r
4bd6bf31
LE
62 initialized with number of pages greater than or equal to the given\r
63 PoolPages.\r
b721aa74
BS
64\r
65 Once the pages in the pool are used up, this method should be called again to\r
4bd6bf31
LE
66 reserve at least another PAGE_TABLE_POOL_UNIT_PAGES. Usually this won't\r
67 happen often in practice.\r
b721aa74
BS
68\r
69 @param[in] PoolPages The least page number of the pool to be created.\r
70\r
71 @retval TRUE The pool is initialized successfully.\r
72 @retval FALSE The memory is out of resource.\r
73**/\r
74STATIC\r
75BOOLEAN\r
76InitializePageTablePool (\r
77 IN UINTN PoolPages\r
78 )\r
79{\r
80 VOID *Buffer;\r
81\r
82 //\r
83 // Always reserve at least PAGE_TABLE_POOL_UNIT_PAGES, including one page for\r
84 // header.\r
85 //\r
86 PoolPages += 1; // Add one page for header.\r
87 PoolPages = ((PoolPages - 1) / PAGE_TABLE_POOL_UNIT_PAGES + 1) *\r
88 PAGE_TABLE_POOL_UNIT_PAGES;\r
89 Buffer = AllocateAlignedPages (PoolPages, PAGE_TABLE_POOL_ALIGNMENT);\r
90 if (Buffer == NULL) {\r
91 DEBUG ((DEBUG_ERROR, "ERROR: Out of aligned pages\r\n"));\r
92 return FALSE;\r
93 }\r
94\r
95 //\r
96 // Link all pools into a list for easier track later.\r
97 //\r
98 if (mPageTablePool == NULL) {\r
99 mPageTablePool = Buffer;\r
100 mPageTablePool->NextPool = mPageTablePool;\r
101 } else {\r
102 ((PAGE_TABLE_POOL *)Buffer)->NextPool = mPageTablePool->NextPool;\r
103 mPageTablePool->NextPool = Buffer;\r
104 mPageTablePool = Buffer;\r
105 }\r
106\r
107 //\r
108 // Reserve one page for pool header.\r
109 //\r
110 mPageTablePool->FreePages = PoolPages - 1;\r
111 mPageTablePool->Offset = EFI_PAGES_TO_SIZE (1);\r
112\r
113 return TRUE;\r
114}\r
115\r
116/**\r
117 This API provides a way to allocate memory for page table.\r
118\r
119 This API can be called more than once to allocate memory for page tables.\r
120\r
121 Allocates the number of 4KB pages and returns a pointer to the allocated\r
122 buffer. The buffer returned is aligned on a 4KB boundary.\r
123\r
124 If Pages is 0, then NULL is returned.\r
125 If there is not enough memory remaining to satisfy the request, then NULL is\r
126 returned.\r
127\r
128 @param Pages The number of 4 KB pages to allocate.\r
129\r
130 @return A pointer to the allocated buffer or NULL if allocation fails.\r
131\r
132**/\r
133STATIC\r
134VOID *\r
135EFIAPI\r
136AllocatePageTableMemory (\r
137 IN UINTN Pages\r
138 )\r
139{\r
140 VOID *Buffer;\r
141\r
142 if (Pages == 0) {\r
143 return NULL;\r
144 }\r
145\r
146 //\r
147 // Renew the pool if necessary.\r
148 //\r
149 if (mPageTablePool == NULL ||\r
150 Pages > mPageTablePool->FreePages) {\r
151 if (!InitializePageTablePool (Pages)) {\r
152 return NULL;\r
153 }\r
154 }\r
155\r
156 Buffer = (UINT8 *)mPageTablePool + mPageTablePool->Offset;\r
157\r
158 mPageTablePool->Offset += EFI_PAGES_TO_SIZE (Pages);\r
159 mPageTablePool->FreePages -= Pages;\r
160\r
161 DEBUG ((\r
162 DEBUG_VERBOSE,\r
163 "%a:%a: Buffer=0x%Lx Pages=%ld\n",\r
164 gEfiCallerBaseName,\r
165 __FUNCTION__,\r
166 Buffer,\r
167 Pages\r
168 ));\r
169\r
170 return Buffer;\r
171}\r
172\r
173\r
a1f22614
BS
174/**\r
175 Split 2M page to 4K.\r
176\r
4bd6bf31
LE
177 @param[in] PhysicalAddress Start physical address the 2M page\r
178 covered.\r
a1f22614
BS
179 @param[in, out] PageEntry2M Pointer to 2M page entry.\r
180 @param[in] StackBase Stack base address.\r
181 @param[in] StackSize Stack size.\r
182\r
183**/\r
184STATIC\r
185VOID\r
186Split2MPageTo4K (\r
187 IN PHYSICAL_ADDRESS PhysicalAddress,\r
188 IN OUT UINT64 *PageEntry2M,\r
189 IN PHYSICAL_ADDRESS StackBase,\r
190 IN UINTN StackSize\r
191 )\r
192{\r
193 PHYSICAL_ADDRESS PhysicalAddress4K;\r
194 UINTN IndexOfPageTableEntries;\r
60b195d2
TL
195 PAGE_TABLE_4K_ENTRY *PageTableEntry;\r
196 PAGE_TABLE_4K_ENTRY *PageTableEntry1;\r
a1f22614
BS
197 UINT64 AddressEncMask;\r
198\r
b721aa74 199 PageTableEntry = AllocatePageTableMemory(1);\r
a1f22614
BS
200\r
201 PageTableEntry1 = PageTableEntry;\r
202\r
c330af02 203 AddressEncMask = InternalGetMemEncryptionAddressMask ();\r
a1f22614
BS
204\r
205 ASSERT (PageTableEntry != NULL);\r
206 ASSERT (*PageEntry2M & AddressEncMask);\r
207\r
208 PhysicalAddress4K = PhysicalAddress;\r
4bd6bf31
LE
209 for (IndexOfPageTableEntries = 0;\r
210 IndexOfPageTableEntries < 512;\r
211 (IndexOfPageTableEntries++,\r
212 PageTableEntry++,\r
213 PhysicalAddress4K += SIZE_4KB)) {\r
a1f22614
BS
214 //\r
215 // Fill in the Page Table entries\r
216 //\r
217 PageTableEntry->Uint64 = (UINT64) PhysicalAddress4K | AddressEncMask;\r
218 PageTableEntry->Bits.ReadWrite = 1;\r
219 PageTableEntry->Bits.Present = 1;\r
4bd6bf31
LE
220 if ((PhysicalAddress4K >= StackBase) &&\r
221 (PhysicalAddress4K < StackBase + StackSize)) {\r
a1f22614
BS
222 //\r
223 // Set Nx bit for stack.\r
224 //\r
225 PageTableEntry->Bits.Nx = 1;\r
226 }\r
227 }\r
228\r
229 //\r
230 // Fill in 2M page entry.\r
231 //\r
4bd6bf31
LE
232 *PageEntry2M = ((UINT64)(UINTN)PageTableEntry1 |\r
233 IA32_PG_P | IA32_PG_RW | AddressEncMask);\r
a1f22614
BS
234}\r
235\r
b721aa74
BS
236/**\r
237 Set one page of page table pool memory to be read-only.\r
238\r
239 @param[in] PageTableBase Base address of page table (CR3).\r
240 @param[in] Address Start address of a page to be set as read-only.\r
241 @param[in] Level4Paging Level 4 paging flag.\r
242\r
243**/\r
244STATIC\r
245VOID\r
246SetPageTablePoolReadOnly (\r
247 IN UINTN PageTableBase,\r
248 IN EFI_PHYSICAL_ADDRESS Address,\r
249 IN BOOLEAN Level4Paging\r
250 )\r
251{\r
252 UINTN Index;\r
253 UINTN EntryIndex;\r
254 UINT64 AddressEncMask;\r
255 EFI_PHYSICAL_ADDRESS PhysicalAddress;\r
256 UINT64 *PageTable;\r
257 UINT64 *NewPageTable;\r
258 UINT64 PageAttr;\r
259 UINT64 LevelSize[5];\r
260 UINT64 LevelMask[5];\r
261 UINTN LevelShift[5];\r
262 UINTN Level;\r
263 UINT64 PoolUnitSize;\r
264\r
265 ASSERT (PageTableBase != 0);\r
266\r
267 //\r
268 // Since the page table is always from page table pool, which is always\r
269 // located at the boundary of PcdPageTablePoolAlignment, we just need to\r
270 // set the whole pool unit to be read-only.\r
271 //\r
272 Address = Address & PAGE_TABLE_POOL_ALIGN_MASK;\r
273\r
274 LevelShift[1] = PAGING_L1_ADDRESS_SHIFT;\r
275 LevelShift[2] = PAGING_L2_ADDRESS_SHIFT;\r
276 LevelShift[3] = PAGING_L3_ADDRESS_SHIFT;\r
277 LevelShift[4] = PAGING_L4_ADDRESS_SHIFT;\r
278\r
279 LevelMask[1] = PAGING_4K_ADDRESS_MASK_64;\r
280 LevelMask[2] = PAGING_2M_ADDRESS_MASK_64;\r
281 LevelMask[3] = PAGING_1G_ADDRESS_MASK_64;\r
282 LevelMask[4] = PAGING_1G_ADDRESS_MASK_64;\r
283\r
284 LevelSize[1] = SIZE_4KB;\r
285 LevelSize[2] = SIZE_2MB;\r
286 LevelSize[3] = SIZE_1GB;\r
287 LevelSize[4] = SIZE_512GB;\r
288\r
c330af02 289 AddressEncMask = InternalGetMemEncryptionAddressMask();\r
b721aa74
BS
290 PageTable = (UINT64 *)(UINTN)PageTableBase;\r
291 PoolUnitSize = PAGE_TABLE_POOL_UNIT_SIZE;\r
292\r
293 for (Level = (Level4Paging) ? 4 : 3; Level > 0; --Level) {\r
294 Index = ((UINTN)RShiftU64 (Address, LevelShift[Level]));\r
295 Index &= PAGING_PAE_INDEX_MASK;\r
296\r
297 PageAttr = PageTable[Index];\r
298 if ((PageAttr & IA32_PG_PS) == 0) {\r
299 //\r
300 // Go to next level of table.\r
301 //\r
302 PageTable = (UINT64 *)(UINTN)(PageAttr & ~AddressEncMask &\r
303 PAGING_4K_ADDRESS_MASK_64);\r
304 continue;\r
305 }\r
306\r
307 if (PoolUnitSize >= LevelSize[Level]) {\r
308 //\r
309 // Clear R/W bit if current page granularity is not larger than pool unit\r
310 // size.\r
311 //\r
312 if ((PageAttr & IA32_PG_RW) != 0) {\r
313 while (PoolUnitSize > 0) {\r
314 //\r
315 // PAGE_TABLE_POOL_UNIT_SIZE and PAGE_TABLE_POOL_ALIGNMENT are fit in\r
316 // one page (2MB). Then we don't need to update attributes for pages\r
317 // crossing page directory. ASSERT below is for that purpose.\r
318 //\r
319 ASSERT (Index < EFI_PAGE_SIZE/sizeof (UINT64));\r
320\r
321 PageTable[Index] &= ~(UINT64)IA32_PG_RW;\r
322 PoolUnitSize -= LevelSize[Level];\r
323\r
324 ++Index;\r
325 }\r
326 }\r
327\r
328 break;\r
329\r
330 } else {\r
331 //\r
332 // The smaller granularity of page must be needed.\r
333 //\r
334 ASSERT (Level > 1);\r
335\r
336 NewPageTable = AllocatePageTableMemory (1);\r
337 ASSERT (NewPageTable != NULL);\r
338\r
339 PhysicalAddress = PageAttr & LevelMask[Level];\r
340 for (EntryIndex = 0;\r
341 EntryIndex < EFI_PAGE_SIZE/sizeof (UINT64);\r
342 ++EntryIndex) {\r
343 NewPageTable[EntryIndex] = PhysicalAddress | AddressEncMask |\r
344 IA32_PG_P | IA32_PG_RW;\r
345 if (Level > 2) {\r
346 NewPageTable[EntryIndex] |= IA32_PG_PS;\r
347 }\r
348 PhysicalAddress += LevelSize[Level - 1];\r
349 }\r
350\r
351 PageTable[Index] = (UINT64)(UINTN)NewPageTable | AddressEncMask |\r
352 IA32_PG_P | IA32_PG_RW;\r
353 PageTable = NewPageTable;\r
354 }\r
355 }\r
356}\r
357\r
358/**\r
359 Prevent the memory pages used for page table from been overwritten.\r
360\r
361 @param[in] PageTableBase Base address of page table (CR3).\r
362 @param[in] Level4Paging Level 4 paging flag.\r
363\r
364**/\r
365STATIC\r
366VOID\r
367EnablePageTableProtection (\r
368 IN UINTN PageTableBase,\r
369 IN BOOLEAN Level4Paging\r
370 )\r
371{\r
372 PAGE_TABLE_POOL *HeadPool;\r
373 PAGE_TABLE_POOL *Pool;\r
374 UINT64 PoolSize;\r
375 EFI_PHYSICAL_ADDRESS Address;\r
376\r
377 if (mPageTablePool == NULL) {\r
378 return;\r
379 }\r
380\r
381 //\r
382 // SetPageTablePoolReadOnly might update mPageTablePool. It's safer to\r
383 // remember original one in advance.\r
384 //\r
385 HeadPool = mPageTablePool;\r
386 Pool = HeadPool;\r
387 do {\r
388 Address = (EFI_PHYSICAL_ADDRESS)(UINTN)Pool;\r
389 PoolSize = Pool->Offset + EFI_PAGES_TO_SIZE (Pool->FreePages);\r
390\r
391 //\r
4bd6bf31
LE
392 // The size of one pool must be multiple of PAGE_TABLE_POOL_UNIT_SIZE,\r
393 // which is one of page size of the processor (2MB by default). Let's apply\r
394 // the protection to them one by one.\r
b721aa74
BS
395 //\r
396 while (PoolSize > 0) {\r
397 SetPageTablePoolReadOnly(PageTableBase, Address, Level4Paging);\r
398 Address += PAGE_TABLE_POOL_UNIT_SIZE;\r
399 PoolSize -= PAGE_TABLE_POOL_UNIT_SIZE;\r
400 }\r
401\r
402 Pool = Pool->NextPool;\r
403 } while (Pool != HeadPool);\r
404\r
405}\r
406\r
407\r
a1f22614
BS
408/**\r
409 Split 1G page to 2M.\r
410\r
4bd6bf31
LE
411 @param[in] PhysicalAddress Start physical address the 1G page\r
412 covered.\r
a1f22614
BS
413 @param[in, out] PageEntry1G Pointer to 1G page entry.\r
414 @param[in] StackBase Stack base address.\r
415 @param[in] StackSize Stack size.\r
416\r
417**/\r
418STATIC\r
419VOID\r
420Split1GPageTo2M (\r
421 IN PHYSICAL_ADDRESS PhysicalAddress,\r
422 IN OUT UINT64 *PageEntry1G,\r
423 IN PHYSICAL_ADDRESS StackBase,\r
424 IN UINTN StackSize\r
425 )\r
426{\r
427 PHYSICAL_ADDRESS PhysicalAddress2M;\r
428 UINTN IndexOfPageDirectoryEntries;\r
429 PAGE_TABLE_ENTRY *PageDirectoryEntry;\r
430 UINT64 AddressEncMask;\r
431\r
b721aa74 432 PageDirectoryEntry = AllocatePageTableMemory(1);\r
a1f22614 433\r
c330af02 434 AddressEncMask = InternalGetMemEncryptionAddressMask ();\r
a1f22614 435 ASSERT (PageDirectoryEntry != NULL);\r
45388d04 436 ASSERT (*PageEntry1G & AddressEncMask);\r
a1f22614
BS
437 //\r
438 // Fill in 1G page entry.\r
439 //\r
4bd6bf31
LE
440 *PageEntry1G = ((UINT64)(UINTN)PageDirectoryEntry |\r
441 IA32_PG_P | IA32_PG_RW | AddressEncMask);\r
a1f22614
BS
442\r
443 PhysicalAddress2M = PhysicalAddress;\r
4bd6bf31
LE
444 for (IndexOfPageDirectoryEntries = 0;\r
445 IndexOfPageDirectoryEntries < 512;\r
446 (IndexOfPageDirectoryEntries++,\r
447 PageDirectoryEntry++,\r
448 PhysicalAddress2M += SIZE_2MB)) {\r
449 if ((PhysicalAddress2M < StackBase + StackSize) &&\r
450 ((PhysicalAddress2M + SIZE_2MB) > StackBase)) {\r
a1f22614
BS
451 //\r
452 // Need to split this 2M page that covers stack range.\r
453 //\r
4bd6bf31
LE
454 Split2MPageTo4K (\r
455 PhysicalAddress2M,\r
456 (UINT64 *)PageDirectoryEntry,\r
457 StackBase,\r
458 StackSize\r
459 );\r
a1f22614
BS
460 } else {\r
461 //\r
462 // Fill in the Page Directory entries\r
463 //\r
464 PageDirectoryEntry->Uint64 = (UINT64) PhysicalAddress2M | AddressEncMask;\r
465 PageDirectoryEntry->Bits.ReadWrite = 1;\r
466 PageDirectoryEntry->Bits.Present = 1;\r
467 PageDirectoryEntry->Bits.MustBe1 = 1;\r
468 }\r
469 }\r
470}\r
471\r
472\r
473/**\r
474 Set or Clear the memory encryption bit\r
475\r
60b195d2 476 @param[in, out] PageTablePointer Page table entry pointer (PTE).\r
a1f22614
BS
477 @param[in] Mode Set or Clear encryption bit\r
478\r
479**/\r
480STATIC VOID\r
481SetOrClearCBit(\r
482 IN OUT UINT64* PageTablePointer,\r
483 IN MAP_RANGE_MODE Mode\r
484 )\r
485{\r
486 UINT64 AddressEncMask;\r
487\r
c330af02 488 AddressEncMask = InternalGetMemEncryptionAddressMask ();\r
a1f22614
BS
489\r
490 if (Mode == SetCBit) {\r
491 *PageTablePointer |= AddressEncMask;\r
492 } else {\r
493 *PageTablePointer &= ~AddressEncMask;\r
494 }\r
495\r
496}\r
497\r
b721aa74
BS
498/**\r
499 Check the WP status in CR0 register. This bit is used to lock or unlock write\r
500 access to pages marked as read-only.\r
501\r
502 @retval TRUE Write protection is enabled.\r
503 @retval FALSE Write protection is disabled.\r
504**/\r
505STATIC\r
506BOOLEAN\r
507IsReadOnlyPageWriteProtected (\r
508 VOID\r
509 )\r
510{\r
511 return ((AsmReadCr0 () & BIT16) != 0);\r
512}\r
513\r
514\r
515/**\r
516 Disable Write Protect on pages marked as read-only.\r
517**/\r
518STATIC\r
519VOID\r
520DisableReadOnlyPageWriteProtect (\r
521 VOID\r
522 )\r
523{\r
524 AsmWriteCr0 (AsmReadCr0() & ~BIT16);\r
525}\r
526\r
527/**\r
528 Enable Write Protect on pages marked as read-only.\r
529**/\r
c330af02 530STATIC\r
b721aa74
BS
531VOID\r
532EnableReadOnlyPageWriteProtect (\r
533 VOID\r
534 )\r
535{\r
536 AsmWriteCr0 (AsmReadCr0() | BIT16);\r
537}\r
538\r
539\r
a1f22614 540/**\r
4bd6bf31 541 This function either sets or clears memory encryption bit for the memory\r
cde8c568 542 region specified by PhysicalAddress and Length from the current page table\r
4bd6bf31 543 context.\r
a1f22614 544\r
cde8c568 545 The function iterates through the PhysicalAddress one page at a time, and set\r
a1f22614
BS
546 or clears the memory encryption mask in the page table. If it encounters\r
547 that a given physical address range is part of large page then it attempts to\r
548 change the attribute at one go (based on size), otherwise it splits the\r
549 large pages into smaller (e.g 2M page into 4K pages) and then try to set or\r
550 clear the encryption bit on the smallest page size.\r
551\r
cde8c568
LE
552 @param[in] Cr3BaseAddress Cr3 Base Address (if zero then use\r
553 current CR3)\r
a1f22614
BS
554 @param[in] PhysicalAddress The physical address that is the start\r
555 address of a memory region.\r
556 @param[in] Length The length of memory region\r
557 @param[in] Mode Set or Clear mode\r
cde8c568 558 @param[in] CacheFlush Flush the caches before applying the\r
a1f22614
BS
559 encryption mask\r
560\r
4bd6bf31
LE
561 @retval RETURN_SUCCESS The attributes were cleared for the\r
562 memory region.\r
a1f22614 563 @retval RETURN_INVALID_PARAMETER Number of pages is zero.\r
4bd6bf31
LE
564 @retval RETURN_UNSUPPORTED Setting the memory encyrption attribute\r
565 is not supported\r
a1f22614 566**/\r
a1f22614
BS
567STATIC\r
568RETURN_STATUS\r
569EFIAPI\r
570SetMemoryEncDec (\r
571 IN PHYSICAL_ADDRESS Cr3BaseAddress,\r
572 IN PHYSICAL_ADDRESS PhysicalAddress,\r
573 IN UINTN Length,\r
574 IN MAP_RANGE_MODE Mode,\r
575 IN BOOLEAN CacheFlush\r
576 )\r
577{\r
578 PAGE_MAP_AND_DIRECTORY_POINTER *PageMapLevel4Entry;\r
579 PAGE_MAP_AND_DIRECTORY_POINTER *PageUpperDirectoryPointerEntry;\r
580 PAGE_MAP_AND_DIRECTORY_POINTER *PageDirectoryPointerEntry;\r
581 PAGE_TABLE_1G_ENTRY *PageDirectory1GEntry;\r
582 PAGE_TABLE_ENTRY *PageDirectory2MEntry;\r
583 PAGE_TABLE_4K_ENTRY *PageTableEntry;\r
584 UINT64 PgTableMask;\r
585 UINT64 AddressEncMask;\r
b721aa74
BS
586 BOOLEAN IsWpEnabled;\r
587 RETURN_STATUS Status;\r
a1f22614 588\r
699a2c30
DB
589 //\r
590 // Set PageMapLevel4Entry to suppress incorrect compiler/analyzer warnings.\r
591 //\r
592 PageMapLevel4Entry = NULL;\r
593\r
70063aec
LE
594 DEBUG ((\r
595 DEBUG_VERBOSE,\r
596 "%a:%a: Cr3Base=0x%Lx Physical=0x%Lx Length=0x%Lx Mode=%a CacheFlush=%u\n",\r
597 gEfiCallerBaseName,\r
598 __FUNCTION__,\r
599 Cr3BaseAddress,\r
600 PhysicalAddress,\r
601 (UINT64)Length,\r
602 (Mode == SetCBit) ? "Encrypt" : "Decrypt",\r
603 (UINT32)CacheFlush\r
604 ));\r
605\r
a1f22614
BS
606 //\r
607 // Check if we have a valid memory encryption mask\r
608 //\r
c330af02 609 AddressEncMask = InternalGetMemEncryptionAddressMask ();\r
a1f22614
BS
610 if (!AddressEncMask) {\r
611 return RETURN_ACCESS_DENIED;\r
612 }\r
613\r
614 PgTableMask = AddressEncMask | EFI_PAGE_MASK;\r
615\r
616 if (Length == 0) {\r
617 return RETURN_INVALID_PARAMETER;\r
618 }\r
619\r
620 //\r
621 // We are going to change the memory encryption attribute from C=0 -> C=1 or\r
4bd6bf31
LE
622 // vice versa Flush the caches to ensure that data is written into memory\r
623 // with correct C-bit\r
a1f22614
BS
624 //\r
625 if (CacheFlush) {\r
626 WriteBackInvalidateDataCacheRange((VOID*) (UINTN)PhysicalAddress, Length);\r
627 }\r
628\r
b721aa74
BS
629 //\r
630 // Make sure that the page table is changeable.\r
631 //\r
632 IsWpEnabled = IsReadOnlyPageWriteProtected ();\r
633 if (IsWpEnabled) {\r
634 DisableReadOnlyPageWriteProtect ();\r
635 }\r
636\r
637 Status = EFI_SUCCESS;\r
638\r
60b195d2 639 while (Length != 0)\r
a1f22614
BS
640 {\r
641 //\r
642 // If Cr3BaseAddress is not specified then read the current CR3\r
643 //\r
644 if (Cr3BaseAddress == 0) {\r
645 Cr3BaseAddress = AsmReadCr3();\r
646 }\r
647\r
648 PageMapLevel4Entry = (VOID*) (Cr3BaseAddress & ~PgTableMask);\r
649 PageMapLevel4Entry += PML4_OFFSET(PhysicalAddress);\r
650 if (!PageMapLevel4Entry->Bits.Present) {\r
6692af92 651 DEBUG ((\r
3728ea5a
LE
652 DEBUG_ERROR,\r
653 "%a:%a: bad PML4 for Physical=0x%Lx\n",\r
6692af92
LE
654 gEfiCallerBaseName,\r
655 __FUNCTION__,\r
656 PhysicalAddress\r
657 ));\r
b721aa74
BS
658 Status = RETURN_NO_MAPPING;\r
659 goto Done;\r
a1f22614
BS
660 }\r
661\r
4bd6bf31
LE
662 PageDirectory1GEntry = (VOID *)(\r
663 (PageMapLevel4Entry->Bits.PageTableBaseAddress <<\r
664 12) & ~PgTableMask\r
665 );\r
a1f22614
BS
666 PageDirectory1GEntry += PDP_OFFSET(PhysicalAddress);\r
667 if (!PageDirectory1GEntry->Bits.Present) {\r
6692af92 668 DEBUG ((\r
3728ea5a
LE
669 DEBUG_ERROR,\r
670 "%a:%a: bad PDPE for Physical=0x%Lx\n",\r
6692af92
LE
671 gEfiCallerBaseName,\r
672 __FUNCTION__,\r
673 PhysicalAddress\r
674 ));\r
b721aa74
BS
675 Status = RETURN_NO_MAPPING;\r
676 goto Done;\r
a1f22614
BS
677 }\r
678\r
679 //\r
680 // If the MustBe1 bit is not 1, it's not actually a 1GB entry\r
681 //\r
682 if (PageDirectory1GEntry->Bits.MustBe1) {\r
683 //\r
684 // Valid 1GB page\r
685 // If we have at least 1GB to go, we can just update this entry\r
686 //\r
60b195d2 687 if ((PhysicalAddress & (BIT30 - 1)) == 0 && Length >= BIT30) {\r
a1f22614 688 SetOrClearCBit(&PageDirectory1GEntry->Uint64, Mode);\r
6692af92
LE
689 DEBUG ((\r
690 DEBUG_VERBOSE,\r
5597edfa 691 "%a:%a: updated 1GB entry for Physical=0x%Lx\n",\r
6692af92
LE
692 gEfiCallerBaseName,\r
693 __FUNCTION__,\r
694 PhysicalAddress\r
695 ));\r
a1f22614
BS
696 PhysicalAddress += BIT30;\r
697 Length -= BIT30;\r
698 } else {\r
699 //\r
700 // We must split the page\r
701 //\r
6692af92
LE
702 DEBUG ((\r
703 DEBUG_VERBOSE,\r
d8d33741 704 "%a:%a: splitting 1GB page for Physical=0x%Lx\n",\r
6692af92 705 gEfiCallerBaseName,\r
631bd7e0
LE
706 __FUNCTION__,\r
707 PhysicalAddress\r
6692af92 708 ));\r
4bd6bf31
LE
709 Split1GPageTo2M (\r
710 (UINT64)PageDirectory1GEntry->Bits.PageTableBaseAddress << 30,\r
711 (UINT64 *)PageDirectory1GEntry,\r
712 0,\r
713 0\r
714 );\r
a1f22614
BS
715 continue;\r
716 }\r
717 } else {\r
718 //\r
719 // Actually a PDP\r
720 //\r
4bd6bf31
LE
721 PageUpperDirectoryPointerEntry =\r
722 (PAGE_MAP_AND_DIRECTORY_POINTER *)PageDirectory1GEntry;\r
723 PageDirectory2MEntry =\r
724 (VOID *)(\r
725 (PageUpperDirectoryPointerEntry->Bits.PageTableBaseAddress <<\r
726 12) & ~PgTableMask\r
727 );\r
a1f22614
BS
728 PageDirectory2MEntry += PDE_OFFSET(PhysicalAddress);\r
729 if (!PageDirectory2MEntry->Bits.Present) {\r
6692af92 730 DEBUG ((\r
3728ea5a
LE
731 DEBUG_ERROR,\r
732 "%a:%a: bad PDE for Physical=0x%Lx\n",\r
6692af92
LE
733 gEfiCallerBaseName,\r
734 __FUNCTION__,\r
735 PhysicalAddress\r
736 ));\r
b721aa74
BS
737 Status = RETURN_NO_MAPPING;\r
738 goto Done;\r
a1f22614
BS
739 }\r
740 //\r
741 // If the MustBe1 bit is not a 1, it's not a 2MB entry\r
742 //\r
743 if (PageDirectory2MEntry->Bits.MustBe1) {\r
744 //\r
745 // Valid 2MB page\r
746 // If we have at least 2MB left to go, we can just update this entry\r
747 //\r
60b195d2 748 if ((PhysicalAddress & (BIT21-1)) == 0 && Length >= BIT21) {\r
a1f22614
BS
749 SetOrClearCBit (&PageDirectory2MEntry->Uint64, Mode);\r
750 PhysicalAddress += BIT21;\r
751 Length -= BIT21;\r
752 } else {\r
753 //\r
754 // We must split up this page into 4K pages\r
755 //\r
6692af92
LE
756 DEBUG ((\r
757 DEBUG_VERBOSE,\r
d8d33741 758 "%a:%a: splitting 2MB page for Physical=0x%Lx\n",\r
6692af92
LE
759 gEfiCallerBaseName,\r
760 __FUNCTION__,\r
761 PhysicalAddress\r
762 ));\r
4bd6bf31
LE
763 Split2MPageTo4K (\r
764 (UINT64)PageDirectory2MEntry->Bits.PageTableBaseAddress << 21,\r
765 (UINT64 *)PageDirectory2MEntry,\r
766 0,\r
767 0\r
768 );\r
a1f22614
BS
769 continue;\r
770 }\r
771 } else {\r
4bd6bf31
LE
772 PageDirectoryPointerEntry =\r
773 (PAGE_MAP_AND_DIRECTORY_POINTER *)PageDirectory2MEntry;\r
774 PageTableEntry =\r
775 (VOID *)(\r
776 (PageDirectoryPointerEntry->Bits.PageTableBaseAddress <<\r
777 12) & ~PgTableMask\r
778 );\r
a1f22614
BS
779 PageTableEntry += PTE_OFFSET(PhysicalAddress);\r
780 if (!PageTableEntry->Bits.Present) {\r
6692af92 781 DEBUG ((\r
3728ea5a
LE
782 DEBUG_ERROR,\r
783 "%a:%a: bad PTE for Physical=0x%Lx\n",\r
6692af92
LE
784 gEfiCallerBaseName,\r
785 __FUNCTION__,\r
786 PhysicalAddress\r
787 ));\r
b721aa74
BS
788 Status = RETURN_NO_MAPPING;\r
789 goto Done;\r
a1f22614
BS
790 }\r
791 SetOrClearCBit (&PageTableEntry->Uint64, Mode);\r
792 PhysicalAddress += EFI_PAGE_SIZE;\r
793 Length -= EFI_PAGE_SIZE;\r
794 }\r
795 }\r
796 }\r
797\r
b721aa74
BS
798 //\r
799 // Protect the page table by marking the memory used for page table to be\r
800 // read-only.\r
801 //\r
802 if (IsWpEnabled) {\r
803 EnablePageTableProtection ((UINTN)PageMapLevel4Entry, TRUE);\r
804 }\r
805\r
a1f22614
BS
806 //\r
807 // Flush TLB\r
808 //\r
809 CpuFlushTlb();\r
810\r
b721aa74
BS
811Done:\r
812 //\r
813 // Restore page table write protection, if any.\r
814 //\r
815 if (IsWpEnabled) {\r
816 EnableReadOnlyPageWriteProtect ();\r
817 }\r
818\r
819 return Status;\r
a1f22614
BS
820}\r
821\r
822/**\r
823 This function clears memory encryption bit for the memory region specified by\r
1532e5d5 824 PhysicalAddress and Length from the current page table context.\r
a1f22614 825\r
1532e5d5
LE
826 @param[in] Cr3BaseAddress Cr3 Base Address (if zero then use\r
827 current CR3)\r
a1f22614
BS
828 @param[in] PhysicalAddress The physical address that is the start\r
829 address of a memory region.\r
830 @param[in] Length The length of memory region\r
831 @param[in] Flush Flush the caches before applying the\r
832 encryption mask\r
833\r
4bd6bf31
LE
834 @retval RETURN_SUCCESS The attributes were cleared for the\r
835 memory region.\r
a1f22614 836 @retval RETURN_INVALID_PARAMETER Number of pages is zero.\r
1532e5d5 837 @retval RETURN_UNSUPPORTED Clearing the memory encyrption attribute\r
4bd6bf31 838 is not supported\r
a1f22614
BS
839**/\r
840RETURN_STATUS\r
841EFIAPI\r
842InternalMemEncryptSevSetMemoryDecrypted (\r
843 IN PHYSICAL_ADDRESS Cr3BaseAddress,\r
844 IN PHYSICAL_ADDRESS PhysicalAddress,\r
845 IN UINTN Length,\r
846 IN BOOLEAN Flush\r
847 )\r
848{\r
849\r
4bd6bf31
LE
850 return SetMemoryEncDec (\r
851 Cr3BaseAddress,\r
852 PhysicalAddress,\r
853 Length,\r
854 ClearCBit,\r
855 Flush\r
856 );\r
a1f22614
BS
857}\r
858\r
859/**\r
860 This function sets memory encryption bit for the memory region specified by\r
68e60a38 861 PhysicalAddress and Length from the current page table context.\r
a1f22614 862\r
68e60a38
LE
863 @param[in] Cr3BaseAddress Cr3 Base Address (if zero then use\r
864 current CR3)\r
4bd6bf31
LE
865 @param[in] PhysicalAddress The physical address that is the start\r
866 address of a memory region.\r
a1f22614
BS
867 @param[in] Length The length of memory region\r
868 @param[in] Flush Flush the caches before applying the\r
869 encryption mask\r
870\r
68e60a38
LE
871 @retval RETURN_SUCCESS The attributes were set for the memory\r
872 region.\r
a1f22614 873 @retval RETURN_INVALID_PARAMETER Number of pages is zero.\r
4bd6bf31
LE
874 @retval RETURN_UNSUPPORTED Setting the memory encyrption attribute\r
875 is not supported\r
a1f22614
BS
876**/\r
877RETURN_STATUS\r
878EFIAPI\r
879InternalMemEncryptSevSetMemoryEncrypted (\r
880 IN PHYSICAL_ADDRESS Cr3BaseAddress,\r
881 IN PHYSICAL_ADDRESS PhysicalAddress,\r
882 IN UINTN Length,\r
883 IN BOOLEAN Flush\r
884 )\r
885{\r
4bd6bf31
LE
886 return SetMemoryEncDec (\r
887 Cr3BaseAddress,\r
888 PhysicalAddress,\r
889 Length,\r
890 SetCBit,\r
891 Flush\r
892 );\r
a1f22614 893}\r
901a9bfc
BS
894\r
895/**\r
896 This function clears memory encryption bit for the MMIO region specified by\r
897 PhysicalAddress and Length.\r
898\r
899 @param[in] Cr3BaseAddress Cr3 Base Address (if zero then use\r
900 current CR3)\r
901 @param[in] PhysicalAddress The physical address that is the start\r
902 address of a MMIO region.\r
903 @param[in] Length The length of memory region\r
904\r
905 @retval RETURN_SUCCESS The attributes were cleared for the\r
906 memory region.\r
907 @retval RETURN_INVALID_PARAMETER Length is zero.\r
908 @retval RETURN_UNSUPPORTED Clearing the memory encyrption attribute\r
909 is not supported\r
910**/\r
911RETURN_STATUS\r
912EFIAPI\r
913InternalMemEncryptSevClearMmioPageEncMask (\r
914 IN PHYSICAL_ADDRESS Cr3BaseAddress,\r
915 IN PHYSICAL_ADDRESS PhysicalAddress,\r
916 IN UINTN Length\r
917 )\r
918{\r
919 return SetMemoryEncDec (\r
920 Cr3BaseAddress,\r
921 PhysicalAddress,\r
922 Length,\r
923 ClearCBit,\r
924 FALSE\r
925 );\r
926}\r