]> git.proxmox.com Git - mirror_edk2.git/blame - MdeModulePkg/Core/DxeIplPeim/X64/VirtualMemory.c
MdeModulePkg: Add PCD PcdPteMemoryEncryptionAddressOrMask
[mirror_edk2.git] / MdeModulePkg / Core / DxeIplPeim / X64 / VirtualMemory.c
CommitLineData
f3b33289 1/** @file\r
2 x64 Virtual Memory Management Services in the form of an IA-32 driver. \r
3 Used to establish a 1:1 Virtual to Physical Mapping that is required to\r
4 enter Long Mode (x64 64-bit mode).\r
5\r
6 While we make a 1:1 mapping (identity mapping) for all physical pages \r
4140a663 7 we still need to use the MTRR's to ensure that the cachability attributes\r
f3b33289 8 for all memory regions is correct.\r
9\r
10 The basic idea is to use 2MB page table entries where ever possible. If\r
11 more granularity of cachability is required then 4K page tables are used.\r
12\r
13 References:\r
4140a663 14 1) IA-32 Intel(R) Architecture Software Developer's Manual Volume 1:Basic Architecture, Intel\r
15 2) IA-32 Intel(R) Architecture Software Developer's Manual Volume 2:Instruction Set Reference, Intel\r
16 3) IA-32 Intel(R) Architecture Software Developer's Manual Volume 3:System Programmer's Guide, Intel\r
f3b33289 17\r
36829e67 18Copyright (c) 2006 - 2016, Intel Corporation. All rights reserved.<BR>\r
cd5ebaa0 19This program and the accompanying materials\r
f3b33289 20are licensed and made available under the terms and conditions of the BSD License\r
21which accompanies this distribution. The full text of the license may be found at\r
22http://opensource.org/licenses/bsd-license.php\r
23\r
24THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
25WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
26\r
27**/ \r
28\r
29#include "DxeIpl.h"\r
30#include "VirtualMemory.h"\r
31\r
5630cdfe
SZ
32/**\r
33 Enable Execute Disable Bit.\r
34\r
35**/\r
36VOID\r
37EnableExecuteDisableBit (\r
38 VOID\r
39 )\r
40{\r
41 UINT64 MsrRegisters;\r
42\r
43 MsrRegisters = AsmReadMsr64 (0xC0000080);\r
44 MsrRegisters |= BIT11;\r
45 AsmWriteMsr64 (0xC0000080, MsrRegisters);\r
46}\r
47\r
48/**\r
49 Split 2M page to 4K.\r
50\r
51 @param[in] PhysicalAddress Start physical address the 2M page covered.\r
52 @param[in, out] PageEntry2M Pointer to 2M page entry.\r
53 @param[in] StackBase Stack base address.\r
54 @param[in] StackSize Stack size.\r
55\r
56**/\r
57VOID\r
58Split2MPageTo4K (\r
59 IN EFI_PHYSICAL_ADDRESS PhysicalAddress,\r
60 IN OUT UINT64 *PageEntry2M,\r
61 IN EFI_PHYSICAL_ADDRESS StackBase,\r
62 IN UINTN StackSize\r
63 )\r
64{\r
65 EFI_PHYSICAL_ADDRESS PhysicalAddress4K;\r
66 UINTN IndexOfPageTableEntries;\r
67 PAGE_TABLE_4K_ENTRY *PageTableEntry;\r
68\r
69 PageTableEntry = AllocatePages (1);\r
36829e67 70 ASSERT (PageTableEntry != NULL);\r
5630cdfe
SZ
71 //\r
72 // Fill in 2M page entry.\r
73 //\r
74 *PageEntry2M = (UINT64) (UINTN) PageTableEntry | IA32_PG_P | IA32_PG_RW;\r
75\r
76 PhysicalAddress4K = PhysicalAddress;\r
77 for (IndexOfPageTableEntries = 0; IndexOfPageTableEntries < 512; IndexOfPageTableEntries++, PageTableEntry++, PhysicalAddress4K += SIZE_4KB) {\r
78 //\r
79 // Fill in the Page Table entries\r
80 //\r
81 PageTableEntry->Uint64 = (UINT64) PhysicalAddress4K;\r
82 PageTableEntry->Bits.ReadWrite = 1;\r
83 PageTableEntry->Bits.Present = 1;\r
84 if ((PhysicalAddress4K >= StackBase) && (PhysicalAddress4K < StackBase + StackSize)) {\r
85 //\r
86 // Set Nx bit for stack.\r
87 //\r
88 PageTableEntry->Bits.Nx = 1;\r
89 }\r
90 }\r
91}\r
92\r
93/**\r
94 Split 1G page to 2M.\r
95\r
96 @param[in] PhysicalAddress Start physical address the 1G page covered.\r
97 @param[in, out] PageEntry1G Pointer to 1G page entry.\r
98 @param[in] StackBase Stack base address.\r
99 @param[in] StackSize Stack size.\r
100\r
101**/\r
102VOID\r
103Split1GPageTo2M (\r
104 IN EFI_PHYSICAL_ADDRESS PhysicalAddress,\r
105 IN OUT UINT64 *PageEntry1G,\r
106 IN EFI_PHYSICAL_ADDRESS StackBase,\r
107 IN UINTN StackSize\r
108 )\r
109{\r
110 EFI_PHYSICAL_ADDRESS PhysicalAddress2M;\r
111 UINTN IndexOfPageDirectoryEntries;\r
112 PAGE_TABLE_ENTRY *PageDirectoryEntry;\r
113\r
114 PageDirectoryEntry = AllocatePages (1);\r
36829e67 115 ASSERT (PageDirectoryEntry != NULL);\r
5630cdfe
SZ
116 //\r
117 // Fill in 1G page entry.\r
118 //\r
119 *PageEntry1G = (UINT64) (UINTN) PageDirectoryEntry | IA32_PG_P | IA32_PG_RW;\r
120\r
121 PhysicalAddress2M = PhysicalAddress;\r
122 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PhysicalAddress2M += SIZE_2MB) {\r
123 if ((PhysicalAddress2M < StackBase + StackSize) && ((PhysicalAddress2M + SIZE_2MB) > StackBase)) {\r
124 //\r
125 // Need to split this 2M page that covers stack range.\r
126 //\r
127 Split2MPageTo4K (PhysicalAddress2M, (UINT64 *) PageDirectoryEntry, StackBase, StackSize);\r
128 } else {\r
129 //\r
130 // Fill in the Page Directory entries\r
131 //\r
132 PageDirectoryEntry->Uint64 = (UINT64) PhysicalAddress2M;\r
133 PageDirectoryEntry->Bits.ReadWrite = 1;\r
134 PageDirectoryEntry->Bits.Present = 1;\r
135 PageDirectoryEntry->Bits.MustBe1 = 1;\r
136 }\r
137 }\r
138}\r
139\r
f3b33289 140/**\r
141 Allocates and fills in the Page Directory and Page Table Entries to\r
142 establish a 1:1 Virtual to Physical mapping.\r
143\r
5630cdfe
SZ
144 @param[in] StackBase Stack base address.\r
145 @param[in] StackSize Stack size.\r
f3b33289 146\r
48557c65 147 @return The address of 4 level page map.\r
f3b33289 148\r
149**/\r
150UINTN\r
151CreateIdentityMappingPageTables (\r
5630cdfe
SZ
152 IN EFI_PHYSICAL_ADDRESS StackBase,\r
153 IN UINTN StackSize\r
f3b33289 154 )\r
155{ \r
c56b6566
JY
156 UINT32 RegEax;\r
157 UINT32 RegEdx;\r
f3b33289 158 UINT8 PhysicalAddressBits;\r
159 EFI_PHYSICAL_ADDRESS PageAddress;\r
160 UINTN IndexOfPml4Entries;\r
161 UINTN IndexOfPdpEntries;\r
162 UINTN IndexOfPageDirectoryEntries;\r
4140a663 163 UINT32 NumberOfPml4EntriesNeeded;\r
164 UINT32 NumberOfPdpEntriesNeeded;\r
f3b33289 165 PAGE_MAP_AND_DIRECTORY_POINTER *PageMapLevel4Entry;\r
166 PAGE_MAP_AND_DIRECTORY_POINTER *PageMap;\r
167 PAGE_MAP_AND_DIRECTORY_POINTER *PageDirectoryPointerEntry;\r
168 PAGE_TABLE_ENTRY *PageDirectoryEntry;\r
169 UINTN TotalPagesNum;\r
170 UINTN BigPageAddress;\r
171 VOID *Hob;\r
c56b6566
JY
172 BOOLEAN Page1GSupport;\r
173 PAGE_TABLE_1G_ENTRY *PageDirectory1GEntry;\r
174\r
175 Page1GSupport = FALSE;\r
378175d2
JY
176 if (PcdGetBool(PcdUse1GPageTable)) {\r
177 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);\r
178 if (RegEax >= 0x80000001) {\r
179 AsmCpuid (0x80000001, NULL, NULL, NULL, &RegEdx);\r
180 if ((RegEdx & BIT26) != 0) {\r
181 Page1GSupport = TRUE;\r
182 }\r
c56b6566
JY
183 }\r
184 }\r
f3b33289 185\r
186 //\r
c56b6566 187 // Get physical address bits supported.\r
f3b33289 188 //\r
f3b33289 189 Hob = GetFirstHob (EFI_HOB_TYPE_CPU);\r
190 if (Hob != NULL) {\r
48557c65 191 PhysicalAddressBits = ((EFI_HOB_CPU *) Hob)->SizeOfMemorySpace;\r
c56b6566
JY
192 } else {\r
193 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);\r
194 if (RegEax >= 0x80000008) {\r
195 AsmCpuid (0x80000008, &RegEax, NULL, NULL, NULL);\r
196 PhysicalAddressBits = (UINT8) RegEax;\r
197 } else {\r
198 PhysicalAddressBits = 36;\r
199 }\r
f3b33289 200 }\r
201\r
4140a663 202 //\r
203 // IA-32e paging translates 48-bit linear addresses to 52-bit physical addresses.\r
204 //\r
205 ASSERT (PhysicalAddressBits <= 52);\r
206 if (PhysicalAddressBits > 48) {\r
207 PhysicalAddressBits = 48;\r
208 }\r
209\r
f3b33289 210 //\r
211 // Calculate the table entries needed.\r
212 //\r
213 if (PhysicalAddressBits <= 39 ) {\r
214 NumberOfPml4EntriesNeeded = 1;\r
c56b6566 215 NumberOfPdpEntriesNeeded = (UINT32)LShiftU64 (1, (PhysicalAddressBits - 30));\r
f3b33289 216 } else {\r
c56b6566 217 NumberOfPml4EntriesNeeded = (UINT32)LShiftU64 (1, (PhysicalAddressBits - 39));\r
f3b33289 218 NumberOfPdpEntriesNeeded = 512;\r
219 }\r
220\r
221 //\r
222 // Pre-allocate big pages to avoid later allocations. \r
223 //\r
c56b6566
JY
224 if (!Page1GSupport) {\r
225 TotalPagesNum = (NumberOfPdpEntriesNeeded + 1) * NumberOfPml4EntriesNeeded + 1;\r
226 } else {\r
227 TotalPagesNum = NumberOfPml4EntriesNeeded + 1;\r
228 }\r
f3b33289 229 BigPageAddress = (UINTN) AllocatePages (TotalPagesNum);\r
230 ASSERT (BigPageAddress != 0);\r
231\r
232 //\r
233 // By architecture only one PageMapLevel4 exists - so lets allocate storage for it.\r
234 //\r
235 PageMap = (VOID *) BigPageAddress;\r
c56b6566 236 BigPageAddress += SIZE_4KB;\r
f3b33289 237\r
238 PageMapLevel4Entry = PageMap;\r
239 PageAddress = 0;\r
240 for (IndexOfPml4Entries = 0; IndexOfPml4Entries < NumberOfPml4EntriesNeeded; IndexOfPml4Entries++, PageMapLevel4Entry++) {\r
241 //\r
242 // Each PML4 entry points to a page of Page Directory Pointer entires.\r
243 // So lets allocate space for them and fill them in in the IndexOfPdpEntries loop.\r
244 //\r
245 PageDirectoryPointerEntry = (VOID *) BigPageAddress;\r
c56b6566 246 BigPageAddress += SIZE_4KB;\r
f3b33289 247\r
248 //\r
249 // Make a PML4 Entry\r
250 //\r
251 PageMapLevel4Entry->Uint64 = (UINT64)(UINTN)PageDirectoryPointerEntry;\r
252 PageMapLevel4Entry->Bits.ReadWrite = 1;\r
253 PageMapLevel4Entry->Bits.Present = 1;\r
254\r
c56b6566 255 if (Page1GSupport) {\r
54d3b84e 256 PageDirectory1GEntry = (VOID *) PageDirectoryPointerEntry;\r
c56b6566
JY
257 \r
258 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectory1GEntry++, PageAddress += SIZE_1GB) {\r
5630cdfe
SZ
259 if (PcdGetBool (PcdSetNxForStack) && (PageAddress < StackBase + StackSize) && ((PageAddress + SIZE_1GB) > StackBase)) {\r
260 Split1GPageTo2M (PageAddress, (UINT64 *) PageDirectory1GEntry, StackBase, StackSize);\r
261 } else {\r
262 //\r
263 // Fill in the Page Directory entries\r
264 //\r
265 PageDirectory1GEntry->Uint64 = (UINT64)PageAddress;\r
266 PageDirectory1GEntry->Bits.ReadWrite = 1;\r
267 PageDirectory1GEntry->Bits.Present = 1;\r
268 PageDirectory1GEntry->Bits.MustBe1 = 1;\r
269 }\r
c56b6566
JY
270 }\r
271 } else {\r
272 for (IndexOfPdpEntries = 0; IndexOfPdpEntries < NumberOfPdpEntriesNeeded; IndexOfPdpEntries++, PageDirectoryPointerEntry++) {\r
273 //\r
274 // Each Directory Pointer entries points to a page of Page Directory entires.\r
275 // So allocate space for them and fill them in in the IndexOfPageDirectoryEntries loop.\r
276 // \r
277 PageDirectoryEntry = (VOID *) BigPageAddress;\r
278 BigPageAddress += SIZE_4KB;\r
279\r
280 //\r
281 // Fill in a Page Directory Pointer Entries\r
282 //\r
283 PageDirectoryPointerEntry->Uint64 = (UINT64)(UINTN)PageDirectoryEntry;\r
284 PageDirectoryPointerEntry->Bits.ReadWrite = 1;\r
285 PageDirectoryPointerEntry->Bits.Present = 1;\r
286\r
287 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PageAddress += SIZE_2MB) {\r
5630cdfe
SZ
288 if (PcdGetBool (PcdSetNxForStack) && (PageAddress < StackBase + StackSize) && ((PageAddress + SIZE_2MB) > StackBase)) {\r
289 //\r
290 // Need to split this 2M page that covers stack range.\r
291 //\r
292 Split2MPageTo4K (PageAddress, (UINT64 *) PageDirectoryEntry, StackBase, StackSize);\r
293 } else {\r
294 //\r
295 // Fill in the Page Directory entries\r
296 //\r
297 PageDirectoryEntry->Uint64 = (UINT64)PageAddress;\r
298 PageDirectoryEntry->Bits.ReadWrite = 1;\r
299 PageDirectoryEntry->Bits.Present = 1;\r
300 PageDirectoryEntry->Bits.MustBe1 = 1;\r
301 }\r
c56b6566
JY
302 }\r
303 }\r
f3b33289 304\r
c56b6566
JY
305 for (; IndexOfPdpEntries < 512; IndexOfPdpEntries++, PageDirectoryPointerEntry++) {\r
306 ZeroMem (\r
307 PageDirectoryPointerEntry,\r
308 sizeof(PAGE_MAP_AND_DIRECTORY_POINTER)\r
309 );\r
f3b33289 310 }\r
311 }\r
312 }\r
313\r
314 //\r
315 // For the PML4 entries we are not using fill in a null entry.\r
f3b33289 316 //\r
317 for (; IndexOfPml4Entries < 512; IndexOfPml4Entries++, PageMapLevel4Entry++) {\r
c56b6566
JY
318 ZeroMem (\r
319 PageMapLevel4Entry,\r
320 sizeof (PAGE_MAP_AND_DIRECTORY_POINTER)\r
321 );\r
f3b33289 322 }\r
323\r
5630cdfe
SZ
324 if (PcdGetBool (PcdSetNxForStack)) {\r
325 EnableExecuteDisableBit ();\r
326 }\r
327\r
f3b33289 328 return (UINTN)PageMap;\r
329}\r
330\r