]> git.proxmox.com Git - mirror_edk2.git/blame - OvmfPkg/Library/BaseMemEncryptSevLib/X64/PeiDxeVirtualMemory.c
OvmfPkg/MemEncryptSevLib: change the page state in the RMP table
[mirror_edk2.git] / OvmfPkg / Library / BaseMemEncryptSevLib / X64 / PeiDxeVirtualMemory.c
CommitLineData
a1f22614
BS
1/** @file\r
2\r
3 Virtual Memory Management Services to set or clear the memory encryption bit\r
4\r
699a2c30 5 Copyright (c) 2006 - 2018, Intel Corporation. All rights reserved.<BR>\r
45388d04 6 Copyright (c) 2017 - 2020, AMD Incorporated. All rights reserved.<BR>\r
a1f22614 7\r
b26f0cf9 8 SPDX-License-Identifier: BSD-2-Clause-Patent\r
a1f22614 9\r
4bd6bf31 10 Code is derived from MdeModulePkg/Core/DxeIplPeim/X64/VirtualMemory.c\r
a1f22614
BS
11\r
12**/\r
13\r
14#include <Library/CpuLib.h>\r
45388d04 15#include <Library/MemEncryptSevLib.h>\r
a1f22614 16#include <Register/Amd/Cpuid.h>\r
bd13ecf3 17#include <Register/Cpuid.h>\r
a1f22614
BS
18\r
19#include "VirtualMemory.h"\r
b928eb44 20#include "SnpPageStateChange.h"\r
a1f22614 21\r
ac0a286f
MK
22STATIC BOOLEAN mAddressEncMaskChecked = FALSE;\r
23STATIC UINT64 mAddressEncMask;\r
24STATIC PAGE_TABLE_POOL *mPageTablePool = NULL;\r
a1f22614
BS
25\r
26typedef enum {\r
ac0a286f
MK
27 SetCBit,\r
28 ClearCBit\r
a1f22614
BS
29} MAP_RANGE_MODE;\r
30\r
31/**\r
c330af02 32 Return the pagetable memory encryption mask.\r
a1f22614 33\r
c330af02 34 @return The pagetable memory encryption mask.\r
a1f22614
BS
35\r
36**/\r
a1f22614 37UINT64\r
c330af02
TL
38EFIAPI\r
39InternalGetMemEncryptionAddressMask (\r
a1f22614
BS
40 VOID\r
41 )\r
42{\r
ac0a286f 43 UINT64 EncryptionMask;\r
a1f22614
BS
44\r
45 if (mAddressEncMaskChecked) {\r
46 return mAddressEncMask;\r
47 }\r
48\r
45388d04 49 EncryptionMask = MemEncryptSevGetEncryptionMask ();\r
a1f22614 50\r
ac0a286f 51 mAddressEncMask = EncryptionMask & PAGING_1G_ADDRESS_MASK_64;\r
a1f22614
BS
52 mAddressEncMaskChecked = TRUE;\r
53\r
54 return mAddressEncMask;\r
55}\r
56\r
b721aa74
BS
57/**\r
58 Initialize a buffer pool for page table use only.\r
59\r
60 To reduce the potential split operation on page table, the pages reserved for\r
61 page table should be allocated in the times of PAGE_TABLE_POOL_UNIT_PAGES and\r
62 at the boundary of PAGE_TABLE_POOL_ALIGNMENT. So the page pool is always\r
4bd6bf31
LE
63 initialized with number of pages greater than or equal to the given\r
64 PoolPages.\r
b721aa74
BS
65\r
66 Once the pages in the pool are used up, this method should be called again to\r
4bd6bf31
LE
67 reserve at least another PAGE_TABLE_POOL_UNIT_PAGES. Usually this won't\r
68 happen often in practice.\r
b721aa74
BS
69\r
70 @param[in] PoolPages The least page number of the pool to be created.\r
71\r
72 @retval TRUE The pool is initialized successfully.\r
73 @retval FALSE The memory is out of resource.\r
74**/\r
75STATIC\r
76BOOLEAN\r
77InitializePageTablePool (\r
ac0a286f 78 IN UINTN PoolPages\r
b721aa74
BS
79 )\r
80{\r
ac0a286f 81 VOID *Buffer;\r
b721aa74
BS
82\r
83 //\r
84 // Always reserve at least PAGE_TABLE_POOL_UNIT_PAGES, including one page for\r
85 // header.\r
86 //\r
87 PoolPages += 1; // Add one page for header.\r
ac0a286f
MK
88 PoolPages = ((PoolPages - 1) / PAGE_TABLE_POOL_UNIT_PAGES + 1) *\r
89 PAGE_TABLE_POOL_UNIT_PAGES;\r
b721aa74
BS
90 Buffer = AllocateAlignedPages (PoolPages, PAGE_TABLE_POOL_ALIGNMENT);\r
91 if (Buffer == NULL) {\r
92 DEBUG ((DEBUG_ERROR, "ERROR: Out of aligned pages\r\n"));\r
93 return FALSE;\r
94 }\r
95\r
96 //\r
97 // Link all pools into a list for easier track later.\r
98 //\r
99 if (mPageTablePool == NULL) {\r
ac0a286f 100 mPageTablePool = Buffer;\r
b721aa74
BS
101 mPageTablePool->NextPool = mPageTablePool;\r
102 } else {\r
103 ((PAGE_TABLE_POOL *)Buffer)->NextPool = mPageTablePool->NextPool;\r
ac0a286f
MK
104 mPageTablePool->NextPool = Buffer;\r
105 mPageTablePool = Buffer;\r
b721aa74
BS
106 }\r
107\r
108 //\r
109 // Reserve one page for pool header.\r
110 //\r
ac0a286f
MK
111 mPageTablePool->FreePages = PoolPages - 1;\r
112 mPageTablePool->Offset = EFI_PAGES_TO_SIZE (1);\r
b721aa74
BS
113\r
114 return TRUE;\r
115}\r
116\r
117/**\r
118 This API provides a way to allocate memory for page table.\r
119\r
120 This API can be called more than once to allocate memory for page tables.\r
121\r
122 Allocates the number of 4KB pages and returns a pointer to the allocated\r
123 buffer. The buffer returned is aligned on a 4KB boundary.\r
124\r
125 If Pages is 0, then NULL is returned.\r
126 If there is not enough memory remaining to satisfy the request, then NULL is\r
127 returned.\r
128\r
129 @param Pages The number of 4 KB pages to allocate.\r
130\r
131 @return A pointer to the allocated buffer or NULL if allocation fails.\r
132\r
133**/\r
134STATIC\r
135VOID *\r
136EFIAPI\r
137AllocatePageTableMemory (\r
ac0a286f 138 IN UINTN Pages\r
b721aa74
BS
139 )\r
140{\r
ac0a286f 141 VOID *Buffer;\r
b721aa74
BS
142\r
143 if (Pages == 0) {\r
144 return NULL;\r
145 }\r
146\r
147 //\r
148 // Renew the pool if necessary.\r
149 //\r
ac0a286f
MK
150 if ((mPageTablePool == NULL) ||\r
151 (Pages > mPageTablePool->FreePages))\r
152 {\r
b721aa74
BS
153 if (!InitializePageTablePool (Pages)) {\r
154 return NULL;\r
155 }\r
156 }\r
157\r
158 Buffer = (UINT8 *)mPageTablePool + mPageTablePool->Offset;\r
159\r
ac0a286f
MK
160 mPageTablePool->Offset += EFI_PAGES_TO_SIZE (Pages);\r
161 mPageTablePool->FreePages -= Pages;\r
b721aa74
BS
162\r
163 DEBUG ((\r
164 DEBUG_VERBOSE,\r
165 "%a:%a: Buffer=0x%Lx Pages=%ld\n",\r
166 gEfiCallerBaseName,\r
167 __FUNCTION__,\r
168 Buffer,\r
169 Pages\r
170 ));\r
171\r
172 return Buffer;\r
173}\r
174\r
a1f22614
BS
175/**\r
176 Split 2M page to 4K.\r
177\r
4bd6bf31
LE
178 @param[in] PhysicalAddress Start physical address the 2M page\r
179 covered.\r
a1f22614
BS
180 @param[in, out] PageEntry2M Pointer to 2M page entry.\r
181 @param[in] StackBase Stack base address.\r
182 @param[in] StackSize Stack size.\r
183\r
184**/\r
185STATIC\r
186VOID\r
187Split2MPageTo4K (\r
ac0a286f
MK
188 IN PHYSICAL_ADDRESS PhysicalAddress,\r
189 IN OUT UINT64 *PageEntry2M,\r
190 IN PHYSICAL_ADDRESS StackBase,\r
191 IN UINTN StackSize\r
a1f22614
BS
192 )\r
193{\r
ac0a286f
MK
194 PHYSICAL_ADDRESS PhysicalAddress4K;\r
195 UINTN IndexOfPageTableEntries;\r
196 PAGE_TABLE_4K_ENTRY *PageTableEntry;\r
197 PAGE_TABLE_4K_ENTRY *PageTableEntry1;\r
198 UINT64 AddressEncMask;\r
a1f22614 199\r
ac0a286f 200 PageTableEntry = AllocatePageTableMemory (1);\r
a1f22614
BS
201\r
202 PageTableEntry1 = PageTableEntry;\r
203\r
c330af02 204 AddressEncMask = InternalGetMemEncryptionAddressMask ();\r
a1f22614
BS
205\r
206 ASSERT (PageTableEntry != NULL);\r
207 ASSERT (*PageEntry2M & AddressEncMask);\r
208\r
209 PhysicalAddress4K = PhysicalAddress;\r
4bd6bf31
LE
210 for (IndexOfPageTableEntries = 0;\r
211 IndexOfPageTableEntries < 512;\r
212 (IndexOfPageTableEntries++,\r
213 PageTableEntry++,\r
ac0a286f
MK
214 PhysicalAddress4K += SIZE_4KB))\r
215 {\r
a1f22614
BS
216 //\r
217 // Fill in the Page Table entries\r
218 //\r
ac0a286f 219 PageTableEntry->Uint64 = (UINT64)PhysicalAddress4K | AddressEncMask;\r
a1f22614 220 PageTableEntry->Bits.ReadWrite = 1;\r
ac0a286f 221 PageTableEntry->Bits.Present = 1;\r
4bd6bf31 222 if ((PhysicalAddress4K >= StackBase) &&\r
ac0a286f
MK
223 (PhysicalAddress4K < StackBase + StackSize))\r
224 {\r
a1f22614
BS
225 //\r
226 // Set Nx bit for stack.\r
227 //\r
228 PageTableEntry->Bits.Nx = 1;\r
229 }\r
230 }\r
231\r
232 //\r
233 // Fill in 2M page entry.\r
234 //\r
4bd6bf31
LE
235 *PageEntry2M = ((UINT64)(UINTN)PageTableEntry1 |\r
236 IA32_PG_P | IA32_PG_RW | AddressEncMask);\r
a1f22614
BS
237}\r
238\r
b721aa74
BS
239/**\r
240 Set one page of page table pool memory to be read-only.\r
241\r
242 @param[in] PageTableBase Base address of page table (CR3).\r
243 @param[in] Address Start address of a page to be set as read-only.\r
244 @param[in] Level4Paging Level 4 paging flag.\r
245\r
246**/\r
247STATIC\r
248VOID\r
249SetPageTablePoolReadOnly (\r
ac0a286f
MK
250 IN UINTN PageTableBase,\r
251 IN EFI_PHYSICAL_ADDRESS Address,\r
252 IN BOOLEAN Level4Paging\r
b721aa74
BS
253 )\r
254{\r
255 UINTN Index;\r
256 UINTN EntryIndex;\r
257 UINT64 AddressEncMask;\r
258 EFI_PHYSICAL_ADDRESS PhysicalAddress;\r
259 UINT64 *PageTable;\r
260 UINT64 *NewPageTable;\r
261 UINT64 PageAttr;\r
262 UINT64 LevelSize[5];\r
263 UINT64 LevelMask[5];\r
264 UINTN LevelShift[5];\r
265 UINTN Level;\r
266 UINT64 PoolUnitSize;\r
267\r
268 ASSERT (PageTableBase != 0);\r
269\r
270 //\r
271 // Since the page table is always from page table pool, which is always\r
272 // located at the boundary of PcdPageTablePoolAlignment, we just need to\r
273 // set the whole pool unit to be read-only.\r
274 //\r
275 Address = Address & PAGE_TABLE_POOL_ALIGN_MASK;\r
276\r
277 LevelShift[1] = PAGING_L1_ADDRESS_SHIFT;\r
278 LevelShift[2] = PAGING_L2_ADDRESS_SHIFT;\r
279 LevelShift[3] = PAGING_L3_ADDRESS_SHIFT;\r
280 LevelShift[4] = PAGING_L4_ADDRESS_SHIFT;\r
281\r
282 LevelMask[1] = PAGING_4K_ADDRESS_MASK_64;\r
283 LevelMask[2] = PAGING_2M_ADDRESS_MASK_64;\r
284 LevelMask[3] = PAGING_1G_ADDRESS_MASK_64;\r
285 LevelMask[4] = PAGING_1G_ADDRESS_MASK_64;\r
286\r
287 LevelSize[1] = SIZE_4KB;\r
288 LevelSize[2] = SIZE_2MB;\r
289 LevelSize[3] = SIZE_1GB;\r
290 LevelSize[4] = SIZE_512GB;\r
291\r
ac0a286f
MK
292 AddressEncMask = InternalGetMemEncryptionAddressMask ();\r
293 PageTable = (UINT64 *)(UINTN)PageTableBase;\r
294 PoolUnitSize = PAGE_TABLE_POOL_UNIT_SIZE;\r
b721aa74
BS
295\r
296 for (Level = (Level4Paging) ? 4 : 3; Level > 0; --Level) {\r
ac0a286f 297 Index = ((UINTN)RShiftU64 (Address, LevelShift[Level]));\r
b721aa74
BS
298 Index &= PAGING_PAE_INDEX_MASK;\r
299\r
300 PageAttr = PageTable[Index];\r
301 if ((PageAttr & IA32_PG_PS) == 0) {\r
302 //\r
303 // Go to next level of table.\r
304 //\r
305 PageTable = (UINT64 *)(UINTN)(PageAttr & ~AddressEncMask &\r
306 PAGING_4K_ADDRESS_MASK_64);\r
307 continue;\r
308 }\r
309\r
310 if (PoolUnitSize >= LevelSize[Level]) {\r
311 //\r
312 // Clear R/W bit if current page granularity is not larger than pool unit\r
313 // size.\r
314 //\r
315 if ((PageAttr & IA32_PG_RW) != 0) {\r
316 while (PoolUnitSize > 0) {\r
317 //\r
318 // PAGE_TABLE_POOL_UNIT_SIZE and PAGE_TABLE_POOL_ALIGNMENT are fit in\r
319 // one page (2MB). Then we don't need to update attributes for pages\r
320 // crossing page directory. ASSERT below is for that purpose.\r
321 //\r
322 ASSERT (Index < EFI_PAGE_SIZE/sizeof (UINT64));\r
323\r
324 PageTable[Index] &= ~(UINT64)IA32_PG_RW;\r
ac0a286f 325 PoolUnitSize -= LevelSize[Level];\r
b721aa74
BS
326\r
327 ++Index;\r
328 }\r
329 }\r
330\r
331 break;\r
b721aa74
BS
332 } else {\r
333 //\r
334 // The smaller granularity of page must be needed.\r
335 //\r
336 ASSERT (Level > 1);\r
337\r
338 NewPageTable = AllocatePageTableMemory (1);\r
339 ASSERT (NewPageTable != NULL);\r
340\r
341 PhysicalAddress = PageAttr & LevelMask[Level];\r
342 for (EntryIndex = 0;\r
ac0a286f
MK
343 EntryIndex < EFI_PAGE_SIZE/sizeof (UINT64);\r
344 ++EntryIndex)\r
345 {\r
b721aa74
BS
346 NewPageTable[EntryIndex] = PhysicalAddress | AddressEncMask |\r
347 IA32_PG_P | IA32_PG_RW;\r
348 if (Level > 2) {\r
349 NewPageTable[EntryIndex] |= IA32_PG_PS;\r
350 }\r
ac0a286f 351\r
b721aa74
BS
352 PhysicalAddress += LevelSize[Level - 1];\r
353 }\r
354\r
355 PageTable[Index] = (UINT64)(UINTN)NewPageTable | AddressEncMask |\r
ac0a286f 356 IA32_PG_P | IA32_PG_RW;\r
b721aa74
BS
357 PageTable = NewPageTable;\r
358 }\r
359 }\r
360}\r
361\r
362/**\r
363 Prevent the memory pages used for page table from been overwritten.\r
364\r
365 @param[in] PageTableBase Base address of page table (CR3).\r
366 @param[in] Level4Paging Level 4 paging flag.\r
367\r
368**/\r
369STATIC\r
370VOID\r
371EnablePageTableProtection (\r
ac0a286f
MK
372 IN UINTN PageTableBase,\r
373 IN BOOLEAN Level4Paging\r
b721aa74
BS
374 )\r
375{\r
ac0a286f
MK
376 PAGE_TABLE_POOL *HeadPool;\r
377 PAGE_TABLE_POOL *Pool;\r
378 UINT64 PoolSize;\r
379 EFI_PHYSICAL_ADDRESS Address;\r
b721aa74
BS
380\r
381 if (mPageTablePool == NULL) {\r
382 return;\r
383 }\r
384\r
385 //\r
386 // SetPageTablePoolReadOnly might update mPageTablePool. It's safer to\r
387 // remember original one in advance.\r
388 //\r
389 HeadPool = mPageTablePool;\r
ac0a286f 390 Pool = HeadPool;\r
b721aa74
BS
391 do {\r
392 Address = (EFI_PHYSICAL_ADDRESS)(UINTN)Pool;\r
393 PoolSize = Pool->Offset + EFI_PAGES_TO_SIZE (Pool->FreePages);\r
394\r
395 //\r
4bd6bf31
LE
396 // The size of one pool must be multiple of PAGE_TABLE_POOL_UNIT_SIZE,\r
397 // which is one of page size of the processor (2MB by default). Let's apply\r
398 // the protection to them one by one.\r
b721aa74
BS
399 //\r
400 while (PoolSize > 0) {\r
ac0a286f
MK
401 SetPageTablePoolReadOnly (PageTableBase, Address, Level4Paging);\r
402 Address += PAGE_TABLE_POOL_UNIT_SIZE;\r
403 PoolSize -= PAGE_TABLE_POOL_UNIT_SIZE;\r
b721aa74
BS
404 }\r
405\r
406 Pool = Pool->NextPool;\r
407 } while (Pool != HeadPool);\r
b721aa74
BS
408}\r
409\r
a1f22614
BS
410/**\r
411 Split 1G page to 2M.\r
412\r
4bd6bf31
LE
413 @param[in] PhysicalAddress Start physical address the 1G page\r
414 covered.\r
a1f22614
BS
415 @param[in, out] PageEntry1G Pointer to 1G page entry.\r
416 @param[in] StackBase Stack base address.\r
417 @param[in] StackSize Stack size.\r
418\r
419**/\r
420STATIC\r
421VOID\r
422Split1GPageTo2M (\r
ac0a286f
MK
423 IN PHYSICAL_ADDRESS PhysicalAddress,\r
424 IN OUT UINT64 *PageEntry1G,\r
425 IN PHYSICAL_ADDRESS StackBase,\r
426 IN UINTN StackSize\r
a1f22614
BS
427 )\r
428{\r
ac0a286f
MK
429 PHYSICAL_ADDRESS PhysicalAddress2M;\r
430 UINTN IndexOfPageDirectoryEntries;\r
431 PAGE_TABLE_ENTRY *PageDirectoryEntry;\r
432 UINT64 AddressEncMask;\r
a1f22614 433\r
ac0a286f 434 PageDirectoryEntry = AllocatePageTableMemory (1);\r
a1f22614 435\r
c330af02 436 AddressEncMask = InternalGetMemEncryptionAddressMask ();\r
a1f22614 437 ASSERT (PageDirectoryEntry != NULL);\r
45388d04 438 ASSERT (*PageEntry1G & AddressEncMask);\r
a1f22614
BS
439 //\r
440 // Fill in 1G page entry.\r
441 //\r
4bd6bf31
LE
442 *PageEntry1G = ((UINT64)(UINTN)PageDirectoryEntry |\r
443 IA32_PG_P | IA32_PG_RW | AddressEncMask);\r
a1f22614
BS
444\r
445 PhysicalAddress2M = PhysicalAddress;\r
4bd6bf31
LE
446 for (IndexOfPageDirectoryEntries = 0;\r
447 IndexOfPageDirectoryEntries < 512;\r
448 (IndexOfPageDirectoryEntries++,\r
449 PageDirectoryEntry++,\r
ac0a286f
MK
450 PhysicalAddress2M += SIZE_2MB))\r
451 {\r
4bd6bf31 452 if ((PhysicalAddress2M < StackBase + StackSize) &&\r
ac0a286f
MK
453 ((PhysicalAddress2M + SIZE_2MB) > StackBase))\r
454 {\r
a1f22614
BS
455 //\r
456 // Need to split this 2M page that covers stack range.\r
457 //\r
4bd6bf31
LE
458 Split2MPageTo4K (\r
459 PhysicalAddress2M,\r
460 (UINT64 *)PageDirectoryEntry,\r
461 StackBase,\r
462 StackSize\r
463 );\r
a1f22614
BS
464 } else {\r
465 //\r
466 // Fill in the Page Directory entries\r
467 //\r
ac0a286f 468 PageDirectoryEntry->Uint64 = (UINT64)PhysicalAddress2M | AddressEncMask;\r
a1f22614 469 PageDirectoryEntry->Bits.ReadWrite = 1;\r
ac0a286f
MK
470 PageDirectoryEntry->Bits.Present = 1;\r
471 PageDirectoryEntry->Bits.MustBe1 = 1;\r
a1f22614
BS
472 }\r
473 }\r
474}\r
475\r
a1f22614
BS
476/**\r
477 Set or Clear the memory encryption bit\r
478\r
60b195d2 479 @param[in, out] PageTablePointer Page table entry pointer (PTE).\r
a1f22614
BS
480 @param[in] Mode Set or Clear encryption bit\r
481\r
482**/\r
483STATIC VOID\r
ac0a286f
MK
484SetOrClearCBit (\r
485 IN OUT UINT64 *PageTablePointer,\r
486 IN MAP_RANGE_MODE Mode\r
a1f22614
BS
487 )\r
488{\r
ac0a286f 489 UINT64 AddressEncMask;\r
a1f22614 490\r
c330af02 491 AddressEncMask = InternalGetMemEncryptionAddressMask ();\r
a1f22614
BS
492\r
493 if (Mode == SetCBit) {\r
494 *PageTablePointer |= AddressEncMask;\r
495 } else {\r
496 *PageTablePointer &= ~AddressEncMask;\r
497 }\r
a1f22614
BS
498}\r
499\r
b721aa74
BS
500/**\r
501 Check the WP status in CR0 register. This bit is used to lock or unlock write\r
502 access to pages marked as read-only.\r
503\r
504 @retval TRUE Write protection is enabled.\r
505 @retval FALSE Write protection is disabled.\r
506**/\r
507STATIC\r
508BOOLEAN\r
509IsReadOnlyPageWriteProtected (\r
510 VOID\r
511 )\r
512{\r
513 return ((AsmReadCr0 () & BIT16) != 0);\r
514}\r
515\r
b721aa74
BS
516/**\r
517 Disable Write Protect on pages marked as read-only.\r
518**/\r
519STATIC\r
520VOID\r
521DisableReadOnlyPageWriteProtect (\r
522 VOID\r
523 )\r
524{\r
ac0a286f 525 AsmWriteCr0 (AsmReadCr0 () & ~BIT16);\r
b721aa74
BS
526}\r
527\r
528/**\r
529 Enable Write Protect on pages marked as read-only.\r
530**/\r
c330af02 531STATIC\r
b721aa74
BS
532VOID\r
533EnableReadOnlyPageWriteProtect (\r
534 VOID\r
535 )\r
536{\r
ac0a286f 537 AsmWriteCr0 (AsmReadCr0 () | BIT16);\r
b721aa74
BS
538}\r
539\r
d39f8d88
BS
540RETURN_STATUS\r
541EFIAPI\r
542InternalMemEncryptSevCreateIdentityMap1G (\r
543 IN PHYSICAL_ADDRESS Cr3BaseAddress,\r
544 IN PHYSICAL_ADDRESS PhysicalAddress,\r
545 IN UINTN Length\r
546 )\r
547{\r
548 PAGE_MAP_AND_DIRECTORY_POINTER *PageMapLevel4Entry;\r
549 PAGE_TABLE_1G_ENTRY *PageDirectory1GEntry;\r
550 UINT64 PgTableMask;\r
551 UINT64 AddressEncMask;\r
552 BOOLEAN IsWpEnabled;\r
553 RETURN_STATUS Status;\r
554\r
555 //\r
556 // Set PageMapLevel4Entry to suppress incorrect compiler/analyzer warnings.\r
557 //\r
558 PageMapLevel4Entry = NULL;\r
559\r
560 DEBUG ((\r
561 DEBUG_VERBOSE,\r
562 "%a:%a: Cr3Base=0x%Lx Physical=0x%Lx Length=0x%Lx\n",\r
563 gEfiCallerBaseName,\r
564 __FUNCTION__,\r
565 Cr3BaseAddress,\r
566 PhysicalAddress,\r
567 (UINT64)Length\r
568 ));\r
569\r
570 if (Length == 0) {\r
571 return RETURN_INVALID_PARAMETER;\r
572 }\r
573\r
574 //\r
575 // Check if we have a valid memory encryption mask\r
576 //\r
577 AddressEncMask = InternalGetMemEncryptionAddressMask ();\r
578 if (!AddressEncMask) {\r
579 return RETURN_ACCESS_DENIED;\r
580 }\r
581\r
582 PgTableMask = AddressEncMask | EFI_PAGE_MASK;\r
583\r
584 //\r
585 // Make sure that the page table is changeable.\r
586 //\r
587 IsWpEnabled = IsReadOnlyPageWriteProtected ();\r
588 if (IsWpEnabled) {\r
589 DisableReadOnlyPageWriteProtect ();\r
590 }\r
591\r
592 Status = EFI_SUCCESS;\r
593\r
594 while (Length) {\r
595 //\r
596 // If Cr3BaseAddress is not specified then read the current CR3\r
597 //\r
598 if (Cr3BaseAddress == 0) {\r
599 Cr3BaseAddress = AsmReadCr3 ();\r
600 }\r
601\r
602 PageMapLevel4Entry = (VOID *)(Cr3BaseAddress & ~PgTableMask);\r
603 PageMapLevel4Entry += PML4_OFFSET (PhysicalAddress);\r
604 if (!PageMapLevel4Entry->Bits.Present) {\r
605 DEBUG ((\r
606 DEBUG_ERROR,\r
607 "%a:%a: bad PML4 for Physical=0x%Lx\n",\r
608 gEfiCallerBaseName,\r
609 __FUNCTION__,\r
610 PhysicalAddress\r
611 ));\r
612 Status = RETURN_NO_MAPPING;\r
613 goto Done;\r
614 }\r
615\r
616 PageDirectory1GEntry = (VOID *)(\r
617 (PageMapLevel4Entry->Bits.PageTableBaseAddress <<\r
618 12) & ~PgTableMask\r
619 );\r
620 PageDirectory1GEntry += PDP_OFFSET (PhysicalAddress);\r
621 if (!PageDirectory1GEntry->Bits.Present) {\r
622 PageDirectory1GEntry->Bits.Present = 1;\r
623 PageDirectory1GEntry->Bits.MustBe1 = 1;\r
624 PageDirectory1GEntry->Bits.MustBeZero = 0;\r
625 PageDirectory1GEntry->Bits.ReadWrite = 1;\r
626 PageDirectory1GEntry->Uint64 |= (UINT64)PhysicalAddress | AddressEncMask;\r
627 }\r
628\r
629 if (Length <= BIT30) {\r
630 Length = 0;\r
631 } else {\r
632 Length -= BIT30;\r
633 }\r
634\r
635 PhysicalAddress += BIT30;\r
636 }\r
637\r
638 //\r
639 // Flush TLB\r
640 //\r
641 CpuFlushTlb ();\r
642\r
643Done:\r
644 //\r
645 // Restore page table write protection, if any.\r
646 //\r
647 if (IsWpEnabled) {\r
648 EnableReadOnlyPageWriteProtect ();\r
649 }\r
650\r
651 return Status;\r
652}\r
653\r
a1f22614 654/**\r
4bd6bf31 655 This function either sets or clears memory encryption bit for the memory\r
cde8c568 656 region specified by PhysicalAddress and Length from the current page table\r
4bd6bf31 657 context.\r
a1f22614 658\r
cde8c568 659 The function iterates through the PhysicalAddress one page at a time, and set\r
a1f22614
BS
660 or clears the memory encryption mask in the page table. If it encounters\r
661 that a given physical address range is part of large page then it attempts to\r
662 change the attribute at one go (based on size), otherwise it splits the\r
663 large pages into smaller (e.g 2M page into 4K pages) and then try to set or\r
664 clear the encryption bit on the smallest page size.\r
665\r
cde8c568
LE
666 @param[in] Cr3BaseAddress Cr3 Base Address (if zero then use\r
667 current CR3)\r
a1f22614
BS
668 @param[in] PhysicalAddress The physical address that is the start\r
669 address of a memory region.\r
670 @param[in] Length The length of memory region\r
671 @param[in] Mode Set or Clear mode\r
cde8c568 672 @param[in] CacheFlush Flush the caches before applying the\r
a1f22614
BS
673 encryption mask\r
674\r
4bd6bf31
LE
675 @retval RETURN_SUCCESS The attributes were cleared for the\r
676 memory region.\r
a1f22614 677 @retval RETURN_INVALID_PARAMETER Number of pages is zero.\r
4bd6bf31
LE
678 @retval RETURN_UNSUPPORTED Setting the memory encyrption attribute\r
679 is not supported\r
a1f22614 680**/\r
a1f22614
BS
681STATIC\r
682RETURN_STATUS\r
683EFIAPI\r
684SetMemoryEncDec (\r
ac0a286f
MK
685 IN PHYSICAL_ADDRESS Cr3BaseAddress,\r
686 IN PHYSICAL_ADDRESS PhysicalAddress,\r
687 IN UINTN Length,\r
688 IN MAP_RANGE_MODE Mode,\r
689 IN BOOLEAN CacheFlush\r
a1f22614
BS
690 )\r
691{\r
ac0a286f
MK
692 PAGE_MAP_AND_DIRECTORY_POINTER *PageMapLevel4Entry;\r
693 PAGE_MAP_AND_DIRECTORY_POINTER *PageUpperDirectoryPointerEntry;\r
694 PAGE_MAP_AND_DIRECTORY_POINTER *PageDirectoryPointerEntry;\r
695 PAGE_TABLE_1G_ENTRY *PageDirectory1GEntry;\r
696 PAGE_TABLE_ENTRY *PageDirectory2MEntry;\r
b928eb44 697 PHYSICAL_ADDRESS OrigPhysicalAddress;\r
ac0a286f
MK
698 PAGE_TABLE_4K_ENTRY *PageTableEntry;\r
699 UINT64 PgTableMask;\r
700 UINT64 AddressEncMask;\r
701 BOOLEAN IsWpEnabled;\r
b928eb44 702 UINTN OrigLength;\r
ac0a286f 703 RETURN_STATUS Status;\r
a1f22614 704\r
699a2c30
DB
705 //\r
706 // Set PageMapLevel4Entry to suppress incorrect compiler/analyzer warnings.\r
707 //\r
708 PageMapLevel4Entry = NULL;\r
709\r
70063aec
LE
710 DEBUG ((\r
711 DEBUG_VERBOSE,\r
712 "%a:%a: Cr3Base=0x%Lx Physical=0x%Lx Length=0x%Lx Mode=%a CacheFlush=%u\n",\r
713 gEfiCallerBaseName,\r
714 __FUNCTION__,\r
715 Cr3BaseAddress,\r
716 PhysicalAddress,\r
717 (UINT64)Length,\r
718 (Mode == SetCBit) ? "Encrypt" : "Decrypt",\r
719 (UINT32)CacheFlush\r
720 ));\r
721\r
a1f22614
BS
722 //\r
723 // Check if we have a valid memory encryption mask\r
724 //\r
c330af02 725 AddressEncMask = InternalGetMemEncryptionAddressMask ();\r
a1f22614
BS
726 if (!AddressEncMask) {\r
727 return RETURN_ACCESS_DENIED;\r
728 }\r
729\r
730 PgTableMask = AddressEncMask | EFI_PAGE_MASK;\r
731\r
732 if (Length == 0) {\r
733 return RETURN_INVALID_PARAMETER;\r
734 }\r
735\r
736 //\r
737 // We are going to change the memory encryption attribute from C=0 -> C=1 or\r
4bd6bf31
LE
738 // vice versa Flush the caches to ensure that data is written into memory\r
739 // with correct C-bit\r
a1f22614
BS
740 //\r
741 if (CacheFlush) {\r
ac0a286f 742 WriteBackInvalidateDataCacheRange ((VOID *)(UINTN)PhysicalAddress, Length);\r
a1f22614
BS
743 }\r
744\r
b721aa74
BS
745 //\r
746 // Make sure that the page table is changeable.\r
747 //\r
748 IsWpEnabled = IsReadOnlyPageWriteProtected ();\r
749 if (IsWpEnabled) {\r
750 DisableReadOnlyPageWriteProtect ();\r
751 }\r
752\r
753 Status = EFI_SUCCESS;\r
754\r
b928eb44
BS
755 //\r
756 // To maintain the security gurantees we must set the page to shared in the RMP\r
757 // table before clearing the memory encryption mask from the current page table.\r
758 //\r
759 // The InternalSetPageState() is used for setting the page state in the RMP table.\r
760 //\r
761 if ((Mode == ClearCBit) && MemEncryptSevSnpIsEnabled ()) {\r
762 InternalSetPageState (PhysicalAddress, EFI_SIZE_TO_PAGES (Length), SevSnpPageShared, FALSE);\r
763 }\r
764\r
765 //\r
766 // Save the specified length and physical address (we need it later).\r
767 //\r
768 OrigLength = Length;\r
769 OrigPhysicalAddress = PhysicalAddress;\r
770\r
ac0a286f 771 while (Length != 0) {\r
a1f22614
BS
772 //\r
773 // If Cr3BaseAddress is not specified then read the current CR3\r
774 //\r
775 if (Cr3BaseAddress == 0) {\r
ac0a286f 776 Cr3BaseAddress = AsmReadCr3 ();\r
a1f22614
BS
777 }\r
778\r
ac0a286f
MK
779 PageMapLevel4Entry = (VOID *)(Cr3BaseAddress & ~PgTableMask);\r
780 PageMapLevel4Entry += PML4_OFFSET (PhysicalAddress);\r
a1f22614 781 if (!PageMapLevel4Entry->Bits.Present) {\r
6692af92 782 DEBUG ((\r
3728ea5a
LE
783 DEBUG_ERROR,\r
784 "%a:%a: bad PML4 for Physical=0x%Lx\n",\r
6692af92
LE
785 gEfiCallerBaseName,\r
786 __FUNCTION__,\r
787 PhysicalAddress\r
788 ));\r
b721aa74
BS
789 Status = RETURN_NO_MAPPING;\r
790 goto Done;\r
a1f22614
BS
791 }\r
792\r
4bd6bf31 793 PageDirectory1GEntry = (VOID *)(\r
ac0a286f
MK
794 (PageMapLevel4Entry->Bits.PageTableBaseAddress <<\r
795 12) & ~PgTableMask\r
796 );\r
797 PageDirectory1GEntry += PDP_OFFSET (PhysicalAddress);\r
a1f22614 798 if (!PageDirectory1GEntry->Bits.Present) {\r
6692af92 799 DEBUG ((\r
3728ea5a
LE
800 DEBUG_ERROR,\r
801 "%a:%a: bad PDPE for Physical=0x%Lx\n",\r
6692af92
LE
802 gEfiCallerBaseName,\r
803 __FUNCTION__,\r
804 PhysicalAddress\r
805 ));\r
b721aa74
BS
806 Status = RETURN_NO_MAPPING;\r
807 goto Done;\r
a1f22614
BS
808 }\r
809\r
810 //\r
811 // If the MustBe1 bit is not 1, it's not actually a 1GB entry\r
812 //\r
813 if (PageDirectory1GEntry->Bits.MustBe1) {\r
814 //\r
815 // Valid 1GB page\r
816 // If we have at least 1GB to go, we can just update this entry\r
817 //\r
ac0a286f
MK
818 if (((PhysicalAddress & (BIT30 - 1)) == 0) && (Length >= BIT30)) {\r
819 SetOrClearCBit (&PageDirectory1GEntry->Uint64, Mode);\r
6692af92
LE
820 DEBUG ((\r
821 DEBUG_VERBOSE,\r
5597edfa 822 "%a:%a: updated 1GB entry for Physical=0x%Lx\n",\r
6692af92
LE
823 gEfiCallerBaseName,\r
824 __FUNCTION__,\r
825 PhysicalAddress\r
826 ));\r
a1f22614 827 PhysicalAddress += BIT30;\r
ac0a286f 828 Length -= BIT30;\r
a1f22614
BS
829 } else {\r
830 //\r
831 // We must split the page\r
832 //\r
6692af92
LE
833 DEBUG ((\r
834 DEBUG_VERBOSE,\r
d8d33741 835 "%a:%a: splitting 1GB page for Physical=0x%Lx\n",\r
6692af92 836 gEfiCallerBaseName,\r
631bd7e0
LE
837 __FUNCTION__,\r
838 PhysicalAddress\r
6692af92 839 ));\r
4bd6bf31
LE
840 Split1GPageTo2M (\r
841 (UINT64)PageDirectory1GEntry->Bits.PageTableBaseAddress << 30,\r
842 (UINT64 *)PageDirectory1GEntry,\r
843 0,\r
844 0\r
845 );\r
a1f22614
BS
846 continue;\r
847 }\r
848 } else {\r
849 //\r
850 // Actually a PDP\r
851 //\r
4bd6bf31
LE
852 PageUpperDirectoryPointerEntry =\r
853 (PAGE_MAP_AND_DIRECTORY_POINTER *)PageDirectory1GEntry;\r
854 PageDirectory2MEntry =\r
855 (VOID *)(\r
ac0a286f
MK
856 (PageUpperDirectoryPointerEntry->Bits.PageTableBaseAddress <<\r
857 12) & ~PgTableMask\r
858 );\r
859 PageDirectory2MEntry += PDE_OFFSET (PhysicalAddress);\r
a1f22614 860 if (!PageDirectory2MEntry->Bits.Present) {\r
6692af92 861 DEBUG ((\r
3728ea5a
LE
862 DEBUG_ERROR,\r
863 "%a:%a: bad PDE for Physical=0x%Lx\n",\r
6692af92
LE
864 gEfiCallerBaseName,\r
865 __FUNCTION__,\r
866 PhysicalAddress\r
867 ));\r
b721aa74
BS
868 Status = RETURN_NO_MAPPING;\r
869 goto Done;\r
a1f22614 870 }\r
ac0a286f 871\r
a1f22614
BS
872 //\r
873 // If the MustBe1 bit is not a 1, it's not a 2MB entry\r
874 //\r
875 if (PageDirectory2MEntry->Bits.MustBe1) {\r
876 //\r
877 // Valid 2MB page\r
878 // If we have at least 2MB left to go, we can just update this entry\r
879 //\r
ac0a286f 880 if (((PhysicalAddress & (BIT21-1)) == 0) && (Length >= BIT21)) {\r
a1f22614
BS
881 SetOrClearCBit (&PageDirectory2MEntry->Uint64, Mode);\r
882 PhysicalAddress += BIT21;\r
ac0a286f 883 Length -= BIT21;\r
a1f22614
BS
884 } else {\r
885 //\r
886 // We must split up this page into 4K pages\r
887 //\r
6692af92
LE
888 DEBUG ((\r
889 DEBUG_VERBOSE,\r
d8d33741 890 "%a:%a: splitting 2MB page for Physical=0x%Lx\n",\r
6692af92
LE
891 gEfiCallerBaseName,\r
892 __FUNCTION__,\r
893 PhysicalAddress\r
894 ));\r
4bd6bf31
LE
895 Split2MPageTo4K (\r
896 (UINT64)PageDirectory2MEntry->Bits.PageTableBaseAddress << 21,\r
897 (UINT64 *)PageDirectory2MEntry,\r
898 0,\r
899 0\r
900 );\r
a1f22614
BS
901 continue;\r
902 }\r
903 } else {\r
4bd6bf31
LE
904 PageDirectoryPointerEntry =\r
905 (PAGE_MAP_AND_DIRECTORY_POINTER *)PageDirectory2MEntry;\r
906 PageTableEntry =\r
907 (VOID *)(\r
ac0a286f
MK
908 (PageDirectoryPointerEntry->Bits.PageTableBaseAddress <<\r
909 12) & ~PgTableMask\r
910 );\r
911 PageTableEntry += PTE_OFFSET (PhysicalAddress);\r
a1f22614 912 if (!PageTableEntry->Bits.Present) {\r
6692af92 913 DEBUG ((\r
3728ea5a
LE
914 DEBUG_ERROR,\r
915 "%a:%a: bad PTE for Physical=0x%Lx\n",\r
6692af92
LE
916 gEfiCallerBaseName,\r
917 __FUNCTION__,\r
918 PhysicalAddress\r
919 ));\r
b721aa74
BS
920 Status = RETURN_NO_MAPPING;\r
921 goto Done;\r
a1f22614 922 }\r
ac0a286f 923\r
a1f22614
BS
924 SetOrClearCBit (&PageTableEntry->Uint64, Mode);\r
925 PhysicalAddress += EFI_PAGE_SIZE;\r
ac0a286f 926 Length -= EFI_PAGE_SIZE;\r
a1f22614
BS
927 }\r
928 }\r
929 }\r
930\r
b721aa74
BS
931 //\r
932 // Protect the page table by marking the memory used for page table to be\r
933 // read-only.\r
934 //\r
935 if (IsWpEnabled) {\r
936 EnablePageTableProtection ((UINTN)PageMapLevel4Entry, TRUE);\r
937 }\r
938\r
a1f22614
BS
939 //\r
940 // Flush TLB\r
941 //\r
ac0a286f 942 CpuFlushTlb ();\r
a1f22614 943\r
b928eb44
BS
944 //\r
945 // SEV-SNP requires that all the private pages (i.e pages mapped encrypted) must be\r
946 // added in the RMP table before the access.\r
947 //\r
948 // The InternalSetPageState() is used for setting the page state in the RMP table.\r
949 //\r
950 if ((Mode == SetCBit) && MemEncryptSevSnpIsEnabled ()) {\r
951 InternalSetPageState (\r
952 OrigPhysicalAddress,\r
953 EFI_SIZE_TO_PAGES (OrigLength),\r
954 SevSnpPagePrivate,\r
955 FALSE\r
956 );\r
957 }\r
958\r
b721aa74
BS
959Done:\r
960 //\r
961 // Restore page table write protection, if any.\r
962 //\r
963 if (IsWpEnabled) {\r
964 EnableReadOnlyPageWriteProtect ();\r
965 }\r
966\r
967 return Status;\r
a1f22614
BS
968}\r
969\r
970/**\r
971 This function clears memory encryption bit for the memory region specified by\r
1532e5d5 972 PhysicalAddress and Length from the current page table context.\r
a1f22614 973\r
1532e5d5
LE
974 @param[in] Cr3BaseAddress Cr3 Base Address (if zero then use\r
975 current CR3)\r
a1f22614
BS
976 @param[in] PhysicalAddress The physical address that is the start\r
977 address of a memory region.\r
978 @param[in] Length The length of memory region\r
a1f22614 979\r
4bd6bf31
LE
980 @retval RETURN_SUCCESS The attributes were cleared for the\r
981 memory region.\r
a1f22614 982 @retval RETURN_INVALID_PARAMETER Number of pages is zero.\r
1532e5d5 983 @retval RETURN_UNSUPPORTED Clearing the memory encyrption attribute\r
4bd6bf31 984 is not supported\r
a1f22614
BS
985**/\r
986RETURN_STATUS\r
987EFIAPI\r
988InternalMemEncryptSevSetMemoryDecrypted (\r
ac0a286f
MK
989 IN PHYSICAL_ADDRESS Cr3BaseAddress,\r
990 IN PHYSICAL_ADDRESS PhysicalAddress,\r
991 IN UINTN Length\r
a1f22614
BS
992 )\r
993{\r
4bd6bf31
LE
994 return SetMemoryEncDec (\r
995 Cr3BaseAddress,\r
996 PhysicalAddress,\r
997 Length,\r
998 ClearCBit,\r
adfa3327 999 TRUE\r
4bd6bf31 1000 );\r
a1f22614
BS
1001}\r
1002\r
1003/**\r
1004 This function sets memory encryption bit for the memory region specified by\r
68e60a38 1005 PhysicalAddress and Length from the current page table context.\r
a1f22614 1006\r
68e60a38
LE
1007 @param[in] Cr3BaseAddress Cr3 Base Address (if zero then use\r
1008 current CR3)\r
4bd6bf31
LE
1009 @param[in] PhysicalAddress The physical address that is the start\r
1010 address of a memory region.\r
a1f22614 1011 @param[in] Length The length of memory region\r
a1f22614 1012\r
68e60a38
LE
1013 @retval RETURN_SUCCESS The attributes were set for the memory\r
1014 region.\r
a1f22614 1015 @retval RETURN_INVALID_PARAMETER Number of pages is zero.\r
4bd6bf31
LE
1016 @retval RETURN_UNSUPPORTED Setting the memory encyrption attribute\r
1017 is not supported\r
a1f22614
BS
1018**/\r
1019RETURN_STATUS\r
1020EFIAPI\r
1021InternalMemEncryptSevSetMemoryEncrypted (\r
ac0a286f
MK
1022 IN PHYSICAL_ADDRESS Cr3BaseAddress,\r
1023 IN PHYSICAL_ADDRESS PhysicalAddress,\r
1024 IN UINTN Length\r
a1f22614
BS
1025 )\r
1026{\r
4bd6bf31
LE
1027 return SetMemoryEncDec (\r
1028 Cr3BaseAddress,\r
1029 PhysicalAddress,\r
1030 Length,\r
1031 SetCBit,\r
adfa3327 1032 TRUE\r
4bd6bf31 1033 );\r
a1f22614 1034}\r
901a9bfc
BS
1035\r
1036/**\r
1037 This function clears memory encryption bit for the MMIO region specified by\r
1038 PhysicalAddress and Length.\r
1039\r
1040 @param[in] Cr3BaseAddress Cr3 Base Address (if zero then use\r
1041 current CR3)\r
1042 @param[in] PhysicalAddress The physical address that is the start\r
1043 address of a MMIO region.\r
1044 @param[in] Length The length of memory region\r
1045\r
1046 @retval RETURN_SUCCESS The attributes were cleared for the\r
1047 memory region.\r
1048 @retval RETURN_INVALID_PARAMETER Length is zero.\r
1049 @retval RETURN_UNSUPPORTED Clearing the memory encyrption attribute\r
1050 is not supported\r
1051**/\r
1052RETURN_STATUS\r
1053EFIAPI\r
1054InternalMemEncryptSevClearMmioPageEncMask (\r
ac0a286f
MK
1055 IN PHYSICAL_ADDRESS Cr3BaseAddress,\r
1056 IN PHYSICAL_ADDRESS PhysicalAddress,\r
1057 IN UINTN Length\r
901a9bfc
BS
1058 )\r
1059{\r
1060 return SetMemoryEncDec (\r
1061 Cr3BaseAddress,\r
1062 PhysicalAddress,\r
1063 Length,\r
1064 ClearCBit,\r
1065 FALSE\r
1066 );\r
1067}\r