]> git.proxmox.com Git - mirror_edk2.git/blame_incremental - MdeModulePkg/Core/DxeIplPeim/X64/VirtualMemory.c
MdeModulePkg DxeIpl: Add stack NX support
[mirror_edk2.git] / MdeModulePkg / Core / DxeIplPeim / X64 / VirtualMemory.c
... / ...
CommitLineData
1/** @file\r
2 x64 Virtual Memory Management Services in the form of an IA-32 driver. \r
3 Used to establish a 1:1 Virtual to Physical Mapping that is required to\r
4 enter Long Mode (x64 64-bit mode).\r
5\r
6 While we make a 1:1 mapping (identity mapping) for all physical pages \r
7 we still need to use the MTRR's to ensure that the cachability attributes\r
8 for all memory regions is correct.\r
9\r
10 The basic idea is to use 2MB page table entries where ever possible. If\r
11 more granularity of cachability is required then 4K page tables are used.\r
12\r
13 References:\r
14 1) IA-32 Intel(R) Architecture Software Developer's Manual Volume 1:Basic Architecture, Intel\r
15 2) IA-32 Intel(R) Architecture Software Developer's Manual Volume 2:Instruction Set Reference, Intel\r
16 3) IA-32 Intel(R) Architecture Software Developer's Manual Volume 3:System Programmer's Guide, Intel\r
17\r
18Copyright (c) 2006 - 2015, Intel Corporation. All rights reserved.<BR>\r
19This program and the accompanying materials\r
20are licensed and made available under the terms and conditions of the BSD License\r
21which accompanies this distribution. The full text of the license may be found at\r
22http://opensource.org/licenses/bsd-license.php\r
23\r
24THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
25WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
26\r
27**/ \r
28\r
29#include "DxeIpl.h"\r
30#include "VirtualMemory.h"\r
31\r
32/**\r
33 Enable Execute Disable Bit.\r
34\r
35**/\r
36VOID\r
37EnableExecuteDisableBit (\r
38 VOID\r
39 )\r
40{\r
41 UINT64 MsrRegisters;\r
42\r
43 MsrRegisters = AsmReadMsr64 (0xC0000080);\r
44 MsrRegisters |= BIT11;\r
45 AsmWriteMsr64 (0xC0000080, MsrRegisters);\r
46}\r
47\r
48/**\r
49 Split 2M page to 4K.\r
50\r
51 @param[in] PhysicalAddress Start physical address the 2M page covered.\r
52 @param[in, out] PageEntry2M Pointer to 2M page entry.\r
53 @param[in] StackBase Stack base address.\r
54 @param[in] StackSize Stack size.\r
55\r
56**/\r
57VOID\r
58Split2MPageTo4K (\r
59 IN EFI_PHYSICAL_ADDRESS PhysicalAddress,\r
60 IN OUT UINT64 *PageEntry2M,\r
61 IN EFI_PHYSICAL_ADDRESS StackBase,\r
62 IN UINTN StackSize\r
63 )\r
64{\r
65 EFI_PHYSICAL_ADDRESS PhysicalAddress4K;\r
66 UINTN IndexOfPageTableEntries;\r
67 PAGE_TABLE_4K_ENTRY *PageTableEntry;\r
68\r
69 PageTableEntry = AllocatePages (1);\r
70 //\r
71 // Fill in 2M page entry.\r
72 //\r
73 *PageEntry2M = (UINT64) (UINTN) PageTableEntry | IA32_PG_P | IA32_PG_RW;\r
74\r
75 PhysicalAddress4K = PhysicalAddress;\r
76 for (IndexOfPageTableEntries = 0; IndexOfPageTableEntries < 512; IndexOfPageTableEntries++, PageTableEntry++, PhysicalAddress4K += SIZE_4KB) {\r
77 //\r
78 // Fill in the Page Table entries\r
79 //\r
80 PageTableEntry->Uint64 = (UINT64) PhysicalAddress4K;\r
81 PageTableEntry->Bits.ReadWrite = 1;\r
82 PageTableEntry->Bits.Present = 1;\r
83 if ((PhysicalAddress4K >= StackBase) && (PhysicalAddress4K < StackBase + StackSize)) {\r
84 //\r
85 // Set Nx bit for stack.\r
86 //\r
87 PageTableEntry->Bits.Nx = 1;\r
88 }\r
89 }\r
90}\r
91\r
92/**\r
93 Split 1G page to 2M.\r
94\r
95 @param[in] PhysicalAddress Start physical address the 1G page covered.\r
96 @param[in, out] PageEntry1G Pointer to 1G page entry.\r
97 @param[in] StackBase Stack base address.\r
98 @param[in] StackSize Stack size.\r
99\r
100**/\r
101VOID\r
102Split1GPageTo2M (\r
103 IN EFI_PHYSICAL_ADDRESS PhysicalAddress,\r
104 IN OUT UINT64 *PageEntry1G,\r
105 IN EFI_PHYSICAL_ADDRESS StackBase,\r
106 IN UINTN StackSize\r
107 )\r
108{\r
109 EFI_PHYSICAL_ADDRESS PhysicalAddress2M;\r
110 UINTN IndexOfPageDirectoryEntries;\r
111 PAGE_TABLE_ENTRY *PageDirectoryEntry;\r
112\r
113 PageDirectoryEntry = AllocatePages (1);\r
114 //\r
115 // Fill in 1G page entry.\r
116 //\r
117 *PageEntry1G = (UINT64) (UINTN) PageDirectoryEntry | IA32_PG_P | IA32_PG_RW;\r
118\r
119 PhysicalAddress2M = PhysicalAddress;\r
120 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PhysicalAddress2M += SIZE_2MB) {\r
121 if ((PhysicalAddress2M < StackBase + StackSize) && ((PhysicalAddress2M + SIZE_2MB) > StackBase)) {\r
122 //\r
123 // Need to split this 2M page that covers stack range.\r
124 //\r
125 Split2MPageTo4K (PhysicalAddress2M, (UINT64 *) PageDirectoryEntry, StackBase, StackSize);\r
126 } else {\r
127 //\r
128 // Fill in the Page Directory entries\r
129 //\r
130 PageDirectoryEntry->Uint64 = (UINT64) PhysicalAddress2M;\r
131 PageDirectoryEntry->Bits.ReadWrite = 1;\r
132 PageDirectoryEntry->Bits.Present = 1;\r
133 PageDirectoryEntry->Bits.MustBe1 = 1;\r
134 }\r
135 }\r
136}\r
137\r
138/**\r
139 Allocates and fills in the Page Directory and Page Table Entries to\r
140 establish a 1:1 Virtual to Physical mapping.\r
141\r
142 @param[in] StackBase Stack base address.\r
143 @param[in] StackSize Stack size.\r
144\r
145 @return The address of 4 level page map.\r
146\r
147**/\r
148UINTN\r
149CreateIdentityMappingPageTables (\r
150 IN EFI_PHYSICAL_ADDRESS StackBase,\r
151 IN UINTN StackSize\r
152 )\r
153{ \r
154 UINT32 RegEax;\r
155 UINT32 RegEdx;\r
156 UINT8 PhysicalAddressBits;\r
157 EFI_PHYSICAL_ADDRESS PageAddress;\r
158 UINTN IndexOfPml4Entries;\r
159 UINTN IndexOfPdpEntries;\r
160 UINTN IndexOfPageDirectoryEntries;\r
161 UINT32 NumberOfPml4EntriesNeeded;\r
162 UINT32 NumberOfPdpEntriesNeeded;\r
163 PAGE_MAP_AND_DIRECTORY_POINTER *PageMapLevel4Entry;\r
164 PAGE_MAP_AND_DIRECTORY_POINTER *PageMap;\r
165 PAGE_MAP_AND_DIRECTORY_POINTER *PageDirectoryPointerEntry;\r
166 PAGE_TABLE_ENTRY *PageDirectoryEntry;\r
167 UINTN TotalPagesNum;\r
168 UINTN BigPageAddress;\r
169 VOID *Hob;\r
170 BOOLEAN Page1GSupport;\r
171 PAGE_TABLE_1G_ENTRY *PageDirectory1GEntry;\r
172\r
173 Page1GSupport = FALSE;\r
174 if (PcdGetBool(PcdUse1GPageTable)) {\r
175 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);\r
176 if (RegEax >= 0x80000001) {\r
177 AsmCpuid (0x80000001, NULL, NULL, NULL, &RegEdx);\r
178 if ((RegEdx & BIT26) != 0) {\r
179 Page1GSupport = TRUE;\r
180 }\r
181 }\r
182 }\r
183\r
184 //\r
185 // Get physical address bits supported.\r
186 //\r
187 Hob = GetFirstHob (EFI_HOB_TYPE_CPU);\r
188 if (Hob != NULL) {\r
189 PhysicalAddressBits = ((EFI_HOB_CPU *) Hob)->SizeOfMemorySpace;\r
190 } else {\r
191 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);\r
192 if (RegEax >= 0x80000008) {\r
193 AsmCpuid (0x80000008, &RegEax, NULL, NULL, NULL);\r
194 PhysicalAddressBits = (UINT8) RegEax;\r
195 } else {\r
196 PhysicalAddressBits = 36;\r
197 }\r
198 }\r
199\r
200 //\r
201 // IA-32e paging translates 48-bit linear addresses to 52-bit physical addresses.\r
202 //\r
203 ASSERT (PhysicalAddressBits <= 52);\r
204 if (PhysicalAddressBits > 48) {\r
205 PhysicalAddressBits = 48;\r
206 }\r
207\r
208 //\r
209 // Calculate the table entries needed.\r
210 //\r
211 if (PhysicalAddressBits <= 39 ) {\r
212 NumberOfPml4EntriesNeeded = 1;\r
213 NumberOfPdpEntriesNeeded = (UINT32)LShiftU64 (1, (PhysicalAddressBits - 30));\r
214 } else {\r
215 NumberOfPml4EntriesNeeded = (UINT32)LShiftU64 (1, (PhysicalAddressBits - 39));\r
216 NumberOfPdpEntriesNeeded = 512;\r
217 }\r
218\r
219 //\r
220 // Pre-allocate big pages to avoid later allocations. \r
221 //\r
222 if (!Page1GSupport) {\r
223 TotalPagesNum = (NumberOfPdpEntriesNeeded + 1) * NumberOfPml4EntriesNeeded + 1;\r
224 } else {\r
225 TotalPagesNum = NumberOfPml4EntriesNeeded + 1;\r
226 }\r
227 BigPageAddress = (UINTN) AllocatePages (TotalPagesNum);\r
228 ASSERT (BigPageAddress != 0);\r
229\r
230 //\r
231 // By architecture only one PageMapLevel4 exists - so lets allocate storage for it.\r
232 //\r
233 PageMap = (VOID *) BigPageAddress;\r
234 BigPageAddress += SIZE_4KB;\r
235\r
236 PageMapLevel4Entry = PageMap;\r
237 PageAddress = 0;\r
238 for (IndexOfPml4Entries = 0; IndexOfPml4Entries < NumberOfPml4EntriesNeeded; IndexOfPml4Entries++, PageMapLevel4Entry++) {\r
239 //\r
240 // Each PML4 entry points to a page of Page Directory Pointer entires.\r
241 // So lets allocate space for them and fill them in in the IndexOfPdpEntries loop.\r
242 //\r
243 PageDirectoryPointerEntry = (VOID *) BigPageAddress;\r
244 BigPageAddress += SIZE_4KB;\r
245\r
246 //\r
247 // Make a PML4 Entry\r
248 //\r
249 PageMapLevel4Entry->Uint64 = (UINT64)(UINTN)PageDirectoryPointerEntry;\r
250 PageMapLevel4Entry->Bits.ReadWrite = 1;\r
251 PageMapLevel4Entry->Bits.Present = 1;\r
252\r
253 if (Page1GSupport) {\r
254 PageDirectory1GEntry = (VOID *) PageDirectoryPointerEntry;\r
255 \r
256 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectory1GEntry++, PageAddress += SIZE_1GB) {\r
257 if (PcdGetBool (PcdSetNxForStack) && (PageAddress < StackBase + StackSize) && ((PageAddress + SIZE_1GB) > StackBase)) {\r
258 Split1GPageTo2M (PageAddress, (UINT64 *) PageDirectory1GEntry, StackBase, StackSize);\r
259 } else {\r
260 //\r
261 // Fill in the Page Directory entries\r
262 //\r
263 PageDirectory1GEntry->Uint64 = (UINT64)PageAddress;\r
264 PageDirectory1GEntry->Bits.ReadWrite = 1;\r
265 PageDirectory1GEntry->Bits.Present = 1;\r
266 PageDirectory1GEntry->Bits.MustBe1 = 1;\r
267 }\r
268 }\r
269 } else {\r
270 for (IndexOfPdpEntries = 0; IndexOfPdpEntries < NumberOfPdpEntriesNeeded; IndexOfPdpEntries++, PageDirectoryPointerEntry++) {\r
271 //\r
272 // Each Directory Pointer entries points to a page of Page Directory entires.\r
273 // So allocate space for them and fill them in in the IndexOfPageDirectoryEntries loop.\r
274 // \r
275 PageDirectoryEntry = (VOID *) BigPageAddress;\r
276 BigPageAddress += SIZE_4KB;\r
277\r
278 //\r
279 // Fill in a Page Directory Pointer Entries\r
280 //\r
281 PageDirectoryPointerEntry->Uint64 = (UINT64)(UINTN)PageDirectoryEntry;\r
282 PageDirectoryPointerEntry->Bits.ReadWrite = 1;\r
283 PageDirectoryPointerEntry->Bits.Present = 1;\r
284\r
285 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PageAddress += SIZE_2MB) {\r
286 if (PcdGetBool (PcdSetNxForStack) && (PageAddress < StackBase + StackSize) && ((PageAddress + SIZE_2MB) > StackBase)) {\r
287 //\r
288 // Need to split this 2M page that covers stack range.\r
289 //\r
290 Split2MPageTo4K (PageAddress, (UINT64 *) PageDirectoryEntry, StackBase, StackSize);\r
291 } else {\r
292 //\r
293 // Fill in the Page Directory entries\r
294 //\r
295 PageDirectoryEntry->Uint64 = (UINT64)PageAddress;\r
296 PageDirectoryEntry->Bits.ReadWrite = 1;\r
297 PageDirectoryEntry->Bits.Present = 1;\r
298 PageDirectoryEntry->Bits.MustBe1 = 1;\r
299 }\r
300 }\r
301 }\r
302\r
303 for (; IndexOfPdpEntries < 512; IndexOfPdpEntries++, PageDirectoryPointerEntry++) {\r
304 ZeroMem (\r
305 PageDirectoryPointerEntry,\r
306 sizeof(PAGE_MAP_AND_DIRECTORY_POINTER)\r
307 );\r
308 }\r
309 }\r
310 }\r
311\r
312 //\r
313 // For the PML4 entries we are not using fill in a null entry.\r
314 //\r
315 for (; IndexOfPml4Entries < 512; IndexOfPml4Entries++, PageMapLevel4Entry++) {\r
316 ZeroMem (\r
317 PageMapLevel4Entry,\r
318 sizeof (PAGE_MAP_AND_DIRECTORY_POINTER)\r
319 );\r
320 }\r
321\r
322 if (PcdGetBool (PcdSetNxForStack)) {\r
323 EnableExecuteDisableBit ();\r
324 }\r
325\r
326 return (UINTN)PageMap;\r
327}\r
328\r