]> git.proxmox.com Git - mirror_edk2.git/blame - MdeModulePkg/Core/DxeIplPeim/X64/VirtualMemory.c
MdeModulePkg/Core/DxeIplPeim: Add support for PCD PcdPteMemoryEncryptionAddressOrMask
[mirror_edk2.git] / MdeModulePkg / Core / DxeIplPeim / X64 / VirtualMemory.c
CommitLineData
f3b33289 1/** @file\r
2 x64 Virtual Memory Management Services in the form of an IA-32 driver. \r
3 Used to establish a 1:1 Virtual to Physical Mapping that is required to\r
4 enter Long Mode (x64 64-bit mode).\r
5\r
6 While we make a 1:1 mapping (identity mapping) for all physical pages \r
4140a663 7 we still need to use the MTRR's to ensure that the cachability attributes\r
f3b33289 8 for all memory regions is correct.\r
9\r
10 The basic idea is to use 2MB page table entries where ever possible. If\r
11 more granularity of cachability is required then 4K page tables are used.\r
12\r
13 References:\r
4140a663 14 1) IA-32 Intel(R) Architecture Software Developer's Manual Volume 1:Basic Architecture, Intel\r
15 2) IA-32 Intel(R) Architecture Software Developer's Manual Volume 2:Instruction Set Reference, Intel\r
16 3) IA-32 Intel(R) Architecture Software Developer's Manual Volume 3:System Programmer's Guide, Intel\r
f3b33289 17\r
36829e67 18Copyright (c) 2006 - 2016, Intel Corporation. All rights reserved.<BR>\r
5997daf7
LD
19Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>\r
20\r
cd5ebaa0 21This program and the accompanying materials\r
f3b33289 22are licensed and made available under the terms and conditions of the BSD License\r
23which accompanies this distribution. The full text of the license may be found at\r
24http://opensource.org/licenses/bsd-license.php\r
25\r
26THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
27WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
28\r
29**/ \r
30\r
31#include "DxeIpl.h"\r
32#include "VirtualMemory.h"\r
33\r
5997daf7 34\r
5630cdfe
SZ
35/**\r
36 Enable Execute Disable Bit.\r
37\r
38**/\r
39VOID\r
40EnableExecuteDisableBit (\r
41 VOID\r
42 )\r
43{\r
44 UINT64 MsrRegisters;\r
45\r
46 MsrRegisters = AsmReadMsr64 (0xC0000080);\r
47 MsrRegisters |= BIT11;\r
48 AsmWriteMsr64 (0xC0000080, MsrRegisters);\r
49}\r
50\r
51/**\r
52 Split 2M page to 4K.\r
53\r
54 @param[in] PhysicalAddress Start physical address the 2M page covered.\r
55 @param[in, out] PageEntry2M Pointer to 2M page entry.\r
56 @param[in] StackBase Stack base address.\r
57 @param[in] StackSize Stack size.\r
58\r
59**/\r
60VOID\r
61Split2MPageTo4K (\r
62 IN EFI_PHYSICAL_ADDRESS PhysicalAddress,\r
63 IN OUT UINT64 *PageEntry2M,\r
64 IN EFI_PHYSICAL_ADDRESS StackBase,\r
65 IN UINTN StackSize\r
66 )\r
67{\r
68 EFI_PHYSICAL_ADDRESS PhysicalAddress4K;\r
69 UINTN IndexOfPageTableEntries;\r
70 PAGE_TABLE_4K_ENTRY *PageTableEntry;\r
5997daf7
LD
71 UINT64 AddressEncMask;\r
72\r
73 //\r
74 // Make sure AddressEncMask is contained to smallest supported address field\r
75 //\r
76 AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;\r
5630cdfe
SZ
77\r
78 PageTableEntry = AllocatePages (1);\r
36829e67 79 ASSERT (PageTableEntry != NULL);\r
5997daf7 80\r
5630cdfe
SZ
81 //\r
82 // Fill in 2M page entry.\r
83 //\r
5997daf7 84 *PageEntry2M = (UINT64) (UINTN) PageTableEntry | AddressEncMask | IA32_PG_P | IA32_PG_RW;\r
5630cdfe
SZ
85\r
86 PhysicalAddress4K = PhysicalAddress;\r
87 for (IndexOfPageTableEntries = 0; IndexOfPageTableEntries < 512; IndexOfPageTableEntries++, PageTableEntry++, PhysicalAddress4K += SIZE_4KB) {\r
88 //\r
89 // Fill in the Page Table entries\r
90 //\r
5997daf7 91 PageTableEntry->Uint64 = (UINT64) PhysicalAddress4K | AddressEncMask;\r
5630cdfe
SZ
92 PageTableEntry->Bits.ReadWrite = 1;\r
93 PageTableEntry->Bits.Present = 1;\r
94 if ((PhysicalAddress4K >= StackBase) && (PhysicalAddress4K < StackBase + StackSize)) {\r
95 //\r
96 // Set Nx bit for stack.\r
97 //\r
98 PageTableEntry->Bits.Nx = 1;\r
99 }\r
100 }\r
101}\r
102\r
103/**\r
104 Split 1G page to 2M.\r
105\r
106 @param[in] PhysicalAddress Start physical address the 1G page covered.\r
107 @param[in, out] PageEntry1G Pointer to 1G page entry.\r
108 @param[in] StackBase Stack base address.\r
109 @param[in] StackSize Stack size.\r
110\r
111**/\r
112VOID\r
113Split1GPageTo2M (\r
114 IN EFI_PHYSICAL_ADDRESS PhysicalAddress,\r
115 IN OUT UINT64 *PageEntry1G,\r
116 IN EFI_PHYSICAL_ADDRESS StackBase,\r
117 IN UINTN StackSize\r
118 )\r
119{\r
120 EFI_PHYSICAL_ADDRESS PhysicalAddress2M;\r
121 UINTN IndexOfPageDirectoryEntries;\r
122 PAGE_TABLE_ENTRY *PageDirectoryEntry;\r
5997daf7
LD
123 UINT64 AddressEncMask;\r
124\r
125 //\r
126 // Make sure AddressEncMask is contained to smallest supported address field\r
127 //\r
128 AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;\r
5630cdfe
SZ
129\r
130 PageDirectoryEntry = AllocatePages (1);\r
36829e67 131 ASSERT (PageDirectoryEntry != NULL);\r
5997daf7 132\r
5630cdfe
SZ
133 //\r
134 // Fill in 1G page entry.\r
135 //\r
5997daf7 136 *PageEntry1G = (UINT64) (UINTN) PageDirectoryEntry | AddressEncMask | IA32_PG_P | IA32_PG_RW;\r
5630cdfe
SZ
137\r
138 PhysicalAddress2M = PhysicalAddress;\r
139 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PhysicalAddress2M += SIZE_2MB) {\r
140 if ((PhysicalAddress2M < StackBase + StackSize) && ((PhysicalAddress2M + SIZE_2MB) > StackBase)) {\r
141 //\r
142 // Need to split this 2M page that covers stack range.\r
143 //\r
144 Split2MPageTo4K (PhysicalAddress2M, (UINT64 *) PageDirectoryEntry, StackBase, StackSize);\r
145 } else {\r
146 //\r
147 // Fill in the Page Directory entries\r
148 //\r
5997daf7 149 PageDirectoryEntry->Uint64 = (UINT64) PhysicalAddress2M | AddressEncMask;\r
5630cdfe
SZ
150 PageDirectoryEntry->Bits.ReadWrite = 1;\r
151 PageDirectoryEntry->Bits.Present = 1;\r
152 PageDirectoryEntry->Bits.MustBe1 = 1;\r
153 }\r
154 }\r
155}\r
156\r
f3b33289 157/**\r
158 Allocates and fills in the Page Directory and Page Table Entries to\r
159 establish a 1:1 Virtual to Physical mapping.\r
160\r
5630cdfe
SZ
161 @param[in] StackBase Stack base address.\r
162 @param[in] StackSize Stack size.\r
f3b33289 163\r
48557c65 164 @return The address of 4 level page map.\r
f3b33289 165\r
166**/\r
167UINTN\r
168CreateIdentityMappingPageTables (\r
5630cdfe
SZ
169 IN EFI_PHYSICAL_ADDRESS StackBase,\r
170 IN UINTN StackSize\r
f3b33289 171 )\r
172{ \r
c56b6566
JY
173 UINT32 RegEax;\r
174 UINT32 RegEdx;\r
f3b33289 175 UINT8 PhysicalAddressBits;\r
176 EFI_PHYSICAL_ADDRESS PageAddress;\r
177 UINTN IndexOfPml4Entries;\r
178 UINTN IndexOfPdpEntries;\r
179 UINTN IndexOfPageDirectoryEntries;\r
4140a663 180 UINT32 NumberOfPml4EntriesNeeded;\r
181 UINT32 NumberOfPdpEntriesNeeded;\r
f3b33289 182 PAGE_MAP_AND_DIRECTORY_POINTER *PageMapLevel4Entry;\r
183 PAGE_MAP_AND_DIRECTORY_POINTER *PageMap;\r
184 PAGE_MAP_AND_DIRECTORY_POINTER *PageDirectoryPointerEntry;\r
185 PAGE_TABLE_ENTRY *PageDirectoryEntry;\r
186 UINTN TotalPagesNum;\r
187 UINTN BigPageAddress;\r
188 VOID *Hob;\r
c56b6566
JY
189 BOOLEAN Page1GSupport;\r
190 PAGE_TABLE_1G_ENTRY *PageDirectory1GEntry;\r
5997daf7
LD
191 UINT64 AddressEncMask;\r
192\r
193 //\r
194 // Make sure AddressEncMask is contained to smallest supported address field\r
195 //\r
196 AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;\r
c56b6566
JY
197\r
198 Page1GSupport = FALSE;\r
378175d2
JY
199 if (PcdGetBool(PcdUse1GPageTable)) {\r
200 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);\r
201 if (RegEax >= 0x80000001) {\r
202 AsmCpuid (0x80000001, NULL, NULL, NULL, &RegEdx);\r
203 if ((RegEdx & BIT26) != 0) {\r
204 Page1GSupport = TRUE;\r
205 }\r
c56b6566
JY
206 }\r
207 }\r
f3b33289 208\r
209 //\r
c56b6566 210 // Get physical address bits supported.\r
f3b33289 211 //\r
f3b33289 212 Hob = GetFirstHob (EFI_HOB_TYPE_CPU);\r
213 if (Hob != NULL) {\r
48557c65 214 PhysicalAddressBits = ((EFI_HOB_CPU *) Hob)->SizeOfMemorySpace;\r
c56b6566
JY
215 } else {\r
216 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);\r
217 if (RegEax >= 0x80000008) {\r
218 AsmCpuid (0x80000008, &RegEax, NULL, NULL, NULL);\r
219 PhysicalAddressBits = (UINT8) RegEax;\r
220 } else {\r
221 PhysicalAddressBits = 36;\r
222 }\r
f3b33289 223 }\r
224\r
4140a663 225 //\r
226 // IA-32e paging translates 48-bit linear addresses to 52-bit physical addresses.\r
227 //\r
228 ASSERT (PhysicalAddressBits <= 52);\r
229 if (PhysicalAddressBits > 48) {\r
230 PhysicalAddressBits = 48;\r
231 }\r
232\r
f3b33289 233 //\r
234 // Calculate the table entries needed.\r
235 //\r
236 if (PhysicalAddressBits <= 39 ) {\r
237 NumberOfPml4EntriesNeeded = 1;\r
c56b6566 238 NumberOfPdpEntriesNeeded = (UINT32)LShiftU64 (1, (PhysicalAddressBits - 30));\r
f3b33289 239 } else {\r
c56b6566 240 NumberOfPml4EntriesNeeded = (UINT32)LShiftU64 (1, (PhysicalAddressBits - 39));\r
f3b33289 241 NumberOfPdpEntriesNeeded = 512;\r
242 }\r
243\r
244 //\r
245 // Pre-allocate big pages to avoid later allocations. \r
246 //\r
c56b6566
JY
247 if (!Page1GSupport) {\r
248 TotalPagesNum = (NumberOfPdpEntriesNeeded + 1) * NumberOfPml4EntriesNeeded + 1;\r
249 } else {\r
250 TotalPagesNum = NumberOfPml4EntriesNeeded + 1;\r
251 }\r
f3b33289 252 BigPageAddress = (UINTN) AllocatePages (TotalPagesNum);\r
253 ASSERT (BigPageAddress != 0);\r
254\r
255 //\r
256 // By architecture only one PageMapLevel4 exists - so lets allocate storage for it.\r
257 //\r
258 PageMap = (VOID *) BigPageAddress;\r
c56b6566 259 BigPageAddress += SIZE_4KB;\r
f3b33289 260\r
261 PageMapLevel4Entry = PageMap;\r
262 PageAddress = 0;\r
263 for (IndexOfPml4Entries = 0; IndexOfPml4Entries < NumberOfPml4EntriesNeeded; IndexOfPml4Entries++, PageMapLevel4Entry++) {\r
264 //\r
265 // Each PML4 entry points to a page of Page Directory Pointer entires.\r
266 // So lets allocate space for them and fill them in in the IndexOfPdpEntries loop.\r
267 //\r
268 PageDirectoryPointerEntry = (VOID *) BigPageAddress;\r
c56b6566 269 BigPageAddress += SIZE_4KB;\r
f3b33289 270\r
271 //\r
272 // Make a PML4 Entry\r
273 //\r
5997daf7 274 PageMapLevel4Entry->Uint64 = (UINT64)(UINTN)PageDirectoryPointerEntry | AddressEncMask;\r
f3b33289 275 PageMapLevel4Entry->Bits.ReadWrite = 1;\r
276 PageMapLevel4Entry->Bits.Present = 1;\r
277\r
c56b6566 278 if (Page1GSupport) {\r
54d3b84e 279 PageDirectory1GEntry = (VOID *) PageDirectoryPointerEntry;\r
c56b6566
JY
280 \r
281 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectory1GEntry++, PageAddress += SIZE_1GB) {\r
5630cdfe
SZ
282 if (PcdGetBool (PcdSetNxForStack) && (PageAddress < StackBase + StackSize) && ((PageAddress + SIZE_1GB) > StackBase)) {\r
283 Split1GPageTo2M (PageAddress, (UINT64 *) PageDirectory1GEntry, StackBase, StackSize);\r
284 } else {\r
285 //\r
286 // Fill in the Page Directory entries\r
287 //\r
5997daf7 288 PageDirectory1GEntry->Uint64 = (UINT64)PageAddress | AddressEncMask;\r
5630cdfe
SZ
289 PageDirectory1GEntry->Bits.ReadWrite = 1;\r
290 PageDirectory1GEntry->Bits.Present = 1;\r
291 PageDirectory1GEntry->Bits.MustBe1 = 1;\r
292 }\r
c56b6566
JY
293 }\r
294 } else {\r
295 for (IndexOfPdpEntries = 0; IndexOfPdpEntries < NumberOfPdpEntriesNeeded; IndexOfPdpEntries++, PageDirectoryPointerEntry++) {\r
296 //\r
297 // Each Directory Pointer entries points to a page of Page Directory entires.\r
298 // So allocate space for them and fill them in in the IndexOfPageDirectoryEntries loop.\r
299 // \r
300 PageDirectoryEntry = (VOID *) BigPageAddress;\r
301 BigPageAddress += SIZE_4KB;\r
302\r
303 //\r
304 // Fill in a Page Directory Pointer Entries\r
305 //\r
5997daf7 306 PageDirectoryPointerEntry->Uint64 = (UINT64)(UINTN)PageDirectoryEntry | AddressEncMask;\r
c56b6566
JY
307 PageDirectoryPointerEntry->Bits.ReadWrite = 1;\r
308 PageDirectoryPointerEntry->Bits.Present = 1;\r
309\r
310 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PageAddress += SIZE_2MB) {\r
5630cdfe
SZ
311 if (PcdGetBool (PcdSetNxForStack) && (PageAddress < StackBase + StackSize) && ((PageAddress + SIZE_2MB) > StackBase)) {\r
312 //\r
313 // Need to split this 2M page that covers stack range.\r
314 //\r
315 Split2MPageTo4K (PageAddress, (UINT64 *) PageDirectoryEntry, StackBase, StackSize);\r
316 } else {\r
317 //\r
318 // Fill in the Page Directory entries\r
319 //\r
5997daf7 320 PageDirectoryEntry->Uint64 = (UINT64)PageAddress | AddressEncMask;\r
5630cdfe
SZ
321 PageDirectoryEntry->Bits.ReadWrite = 1;\r
322 PageDirectoryEntry->Bits.Present = 1;\r
323 PageDirectoryEntry->Bits.MustBe1 = 1;\r
324 }\r
c56b6566
JY
325 }\r
326 }\r
f3b33289 327\r
c56b6566
JY
328 for (; IndexOfPdpEntries < 512; IndexOfPdpEntries++, PageDirectoryPointerEntry++) {\r
329 ZeroMem (\r
330 PageDirectoryPointerEntry,\r
331 sizeof(PAGE_MAP_AND_DIRECTORY_POINTER)\r
332 );\r
f3b33289 333 }\r
334 }\r
335 }\r
336\r
337 //\r
338 // For the PML4 entries we are not using fill in a null entry.\r
f3b33289 339 //\r
340 for (; IndexOfPml4Entries < 512; IndexOfPml4Entries++, PageMapLevel4Entry++) {\r
c56b6566
JY
341 ZeroMem (\r
342 PageMapLevel4Entry,\r
343 sizeof (PAGE_MAP_AND_DIRECTORY_POINTER)\r
344 );\r
f3b33289 345 }\r
346\r
5630cdfe
SZ
347 if (PcdGetBool (PcdSetNxForStack)) {\r
348 EnableExecuteDisableBit ();\r
349 }\r
350\r
f3b33289 351 return (UINTN)PageMap;\r
352}\r
353\r