]> git.proxmox.com Git - mirror_edk2.git/blob - MdeModulePkg/Core/DxeIplPeim/X64/VirtualMemory.c
MdeModulePkg/Core/DxeIplPeim: Add support for PCD PcdPteMemoryEncryptionAddressOrMask
[mirror_edk2.git] / MdeModulePkg / Core / DxeIplPeim / X64 / VirtualMemory.c
1 /** @file
2 x64 Virtual Memory Management Services in the form of an IA-32 driver.
3 Used to establish a 1:1 Virtual to Physical Mapping that is required to
4 enter Long Mode (x64 64-bit mode).
5
6 While we make a 1:1 mapping (identity mapping) for all physical pages
7 we still need to use the MTRR's to ensure that the cachability attributes
8 for all memory regions is correct.
9
10 The basic idea is to use 2MB page table entries where ever possible. If
11 more granularity of cachability is required then 4K page tables are used.
12
13 References:
14 1) IA-32 Intel(R) Architecture Software Developer's Manual Volume 1:Basic Architecture, Intel
15 2) IA-32 Intel(R) Architecture Software Developer's Manual Volume 2:Instruction Set Reference, Intel
16 3) IA-32 Intel(R) Architecture Software Developer's Manual Volume 3:System Programmer's Guide, Intel
17
18 Copyright (c) 2006 - 2016, Intel Corporation. All rights reserved.<BR>
19 Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
20
21 This program and the accompanying materials
22 are licensed and made available under the terms and conditions of the BSD License
23 which accompanies this distribution. The full text of the license may be found at
24 http://opensource.org/licenses/bsd-license.php
25
26 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
27 WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
28
29 **/
30
31 #include "DxeIpl.h"
32 #include "VirtualMemory.h"
33
34
35 /**
36 Enable Execute Disable Bit.
37
38 **/
39 VOID
40 EnableExecuteDisableBit (
41 VOID
42 )
43 {
44 UINT64 MsrRegisters;
45
46 MsrRegisters = AsmReadMsr64 (0xC0000080);
47 MsrRegisters |= BIT11;
48 AsmWriteMsr64 (0xC0000080, MsrRegisters);
49 }
50
51 /**
52 Split 2M page to 4K.
53
54 @param[in] PhysicalAddress Start physical address the 2M page covered.
55 @param[in, out] PageEntry2M Pointer to 2M page entry.
56 @param[in] StackBase Stack base address.
57 @param[in] StackSize Stack size.
58
59 **/
60 VOID
61 Split2MPageTo4K (
62 IN EFI_PHYSICAL_ADDRESS PhysicalAddress,
63 IN OUT UINT64 *PageEntry2M,
64 IN EFI_PHYSICAL_ADDRESS StackBase,
65 IN UINTN StackSize
66 )
67 {
68 EFI_PHYSICAL_ADDRESS PhysicalAddress4K;
69 UINTN IndexOfPageTableEntries;
70 PAGE_TABLE_4K_ENTRY *PageTableEntry;
71 UINT64 AddressEncMask;
72
73 //
74 // Make sure AddressEncMask is contained to smallest supported address field
75 //
76 AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;
77
78 PageTableEntry = AllocatePages (1);
79 ASSERT (PageTableEntry != NULL);
80
81 //
82 // Fill in 2M page entry.
83 //
84 *PageEntry2M = (UINT64) (UINTN) PageTableEntry | AddressEncMask | IA32_PG_P | IA32_PG_RW;
85
86 PhysicalAddress4K = PhysicalAddress;
87 for (IndexOfPageTableEntries = 0; IndexOfPageTableEntries < 512; IndexOfPageTableEntries++, PageTableEntry++, PhysicalAddress4K += SIZE_4KB) {
88 //
89 // Fill in the Page Table entries
90 //
91 PageTableEntry->Uint64 = (UINT64) PhysicalAddress4K | AddressEncMask;
92 PageTableEntry->Bits.ReadWrite = 1;
93 PageTableEntry->Bits.Present = 1;
94 if ((PhysicalAddress4K >= StackBase) && (PhysicalAddress4K < StackBase + StackSize)) {
95 //
96 // Set Nx bit for stack.
97 //
98 PageTableEntry->Bits.Nx = 1;
99 }
100 }
101 }
102
103 /**
104 Split 1G page to 2M.
105
106 @param[in] PhysicalAddress Start physical address the 1G page covered.
107 @param[in, out] PageEntry1G Pointer to 1G page entry.
108 @param[in] StackBase Stack base address.
109 @param[in] StackSize Stack size.
110
111 **/
112 VOID
113 Split1GPageTo2M (
114 IN EFI_PHYSICAL_ADDRESS PhysicalAddress,
115 IN OUT UINT64 *PageEntry1G,
116 IN EFI_PHYSICAL_ADDRESS StackBase,
117 IN UINTN StackSize
118 )
119 {
120 EFI_PHYSICAL_ADDRESS PhysicalAddress2M;
121 UINTN IndexOfPageDirectoryEntries;
122 PAGE_TABLE_ENTRY *PageDirectoryEntry;
123 UINT64 AddressEncMask;
124
125 //
126 // Make sure AddressEncMask is contained to smallest supported address field
127 //
128 AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;
129
130 PageDirectoryEntry = AllocatePages (1);
131 ASSERT (PageDirectoryEntry != NULL);
132
133 //
134 // Fill in 1G page entry.
135 //
136 *PageEntry1G = (UINT64) (UINTN) PageDirectoryEntry | AddressEncMask | IA32_PG_P | IA32_PG_RW;
137
138 PhysicalAddress2M = PhysicalAddress;
139 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PhysicalAddress2M += SIZE_2MB) {
140 if ((PhysicalAddress2M < StackBase + StackSize) && ((PhysicalAddress2M + SIZE_2MB) > StackBase)) {
141 //
142 // Need to split this 2M page that covers stack range.
143 //
144 Split2MPageTo4K (PhysicalAddress2M, (UINT64 *) PageDirectoryEntry, StackBase, StackSize);
145 } else {
146 //
147 // Fill in the Page Directory entries
148 //
149 PageDirectoryEntry->Uint64 = (UINT64) PhysicalAddress2M | AddressEncMask;
150 PageDirectoryEntry->Bits.ReadWrite = 1;
151 PageDirectoryEntry->Bits.Present = 1;
152 PageDirectoryEntry->Bits.MustBe1 = 1;
153 }
154 }
155 }
156
157 /**
158 Allocates and fills in the Page Directory and Page Table Entries to
159 establish a 1:1 Virtual to Physical mapping.
160
161 @param[in] StackBase Stack base address.
162 @param[in] StackSize Stack size.
163
164 @return The address of 4 level page map.
165
166 **/
167 UINTN
168 CreateIdentityMappingPageTables (
169 IN EFI_PHYSICAL_ADDRESS StackBase,
170 IN UINTN StackSize
171 )
172 {
173 UINT32 RegEax;
174 UINT32 RegEdx;
175 UINT8 PhysicalAddressBits;
176 EFI_PHYSICAL_ADDRESS PageAddress;
177 UINTN IndexOfPml4Entries;
178 UINTN IndexOfPdpEntries;
179 UINTN IndexOfPageDirectoryEntries;
180 UINT32 NumberOfPml4EntriesNeeded;
181 UINT32 NumberOfPdpEntriesNeeded;
182 PAGE_MAP_AND_DIRECTORY_POINTER *PageMapLevel4Entry;
183 PAGE_MAP_AND_DIRECTORY_POINTER *PageMap;
184 PAGE_MAP_AND_DIRECTORY_POINTER *PageDirectoryPointerEntry;
185 PAGE_TABLE_ENTRY *PageDirectoryEntry;
186 UINTN TotalPagesNum;
187 UINTN BigPageAddress;
188 VOID *Hob;
189 BOOLEAN Page1GSupport;
190 PAGE_TABLE_1G_ENTRY *PageDirectory1GEntry;
191 UINT64 AddressEncMask;
192
193 //
194 // Make sure AddressEncMask is contained to smallest supported address field
195 //
196 AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;
197
198 Page1GSupport = FALSE;
199 if (PcdGetBool(PcdUse1GPageTable)) {
200 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);
201 if (RegEax >= 0x80000001) {
202 AsmCpuid (0x80000001, NULL, NULL, NULL, &RegEdx);
203 if ((RegEdx & BIT26) != 0) {
204 Page1GSupport = TRUE;
205 }
206 }
207 }
208
209 //
210 // Get physical address bits supported.
211 //
212 Hob = GetFirstHob (EFI_HOB_TYPE_CPU);
213 if (Hob != NULL) {
214 PhysicalAddressBits = ((EFI_HOB_CPU *) Hob)->SizeOfMemorySpace;
215 } else {
216 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);
217 if (RegEax >= 0x80000008) {
218 AsmCpuid (0x80000008, &RegEax, NULL, NULL, NULL);
219 PhysicalAddressBits = (UINT8) RegEax;
220 } else {
221 PhysicalAddressBits = 36;
222 }
223 }
224
225 //
226 // IA-32e paging translates 48-bit linear addresses to 52-bit physical addresses.
227 //
228 ASSERT (PhysicalAddressBits <= 52);
229 if (PhysicalAddressBits > 48) {
230 PhysicalAddressBits = 48;
231 }
232
233 //
234 // Calculate the table entries needed.
235 //
236 if (PhysicalAddressBits <= 39 ) {
237 NumberOfPml4EntriesNeeded = 1;
238 NumberOfPdpEntriesNeeded = (UINT32)LShiftU64 (1, (PhysicalAddressBits - 30));
239 } else {
240 NumberOfPml4EntriesNeeded = (UINT32)LShiftU64 (1, (PhysicalAddressBits - 39));
241 NumberOfPdpEntriesNeeded = 512;
242 }
243
244 //
245 // Pre-allocate big pages to avoid later allocations.
246 //
247 if (!Page1GSupport) {
248 TotalPagesNum = (NumberOfPdpEntriesNeeded + 1) * NumberOfPml4EntriesNeeded + 1;
249 } else {
250 TotalPagesNum = NumberOfPml4EntriesNeeded + 1;
251 }
252 BigPageAddress = (UINTN) AllocatePages (TotalPagesNum);
253 ASSERT (BigPageAddress != 0);
254
255 //
256 // By architecture only one PageMapLevel4 exists - so lets allocate storage for it.
257 //
258 PageMap = (VOID *) BigPageAddress;
259 BigPageAddress += SIZE_4KB;
260
261 PageMapLevel4Entry = PageMap;
262 PageAddress = 0;
263 for (IndexOfPml4Entries = 0; IndexOfPml4Entries < NumberOfPml4EntriesNeeded; IndexOfPml4Entries++, PageMapLevel4Entry++) {
264 //
265 // Each PML4 entry points to a page of Page Directory Pointer entires.
266 // So lets allocate space for them and fill them in in the IndexOfPdpEntries loop.
267 //
268 PageDirectoryPointerEntry = (VOID *) BigPageAddress;
269 BigPageAddress += SIZE_4KB;
270
271 //
272 // Make a PML4 Entry
273 //
274 PageMapLevel4Entry->Uint64 = (UINT64)(UINTN)PageDirectoryPointerEntry | AddressEncMask;
275 PageMapLevel4Entry->Bits.ReadWrite = 1;
276 PageMapLevel4Entry->Bits.Present = 1;
277
278 if (Page1GSupport) {
279 PageDirectory1GEntry = (VOID *) PageDirectoryPointerEntry;
280
281 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectory1GEntry++, PageAddress += SIZE_1GB) {
282 if (PcdGetBool (PcdSetNxForStack) && (PageAddress < StackBase + StackSize) && ((PageAddress + SIZE_1GB) > StackBase)) {
283 Split1GPageTo2M (PageAddress, (UINT64 *) PageDirectory1GEntry, StackBase, StackSize);
284 } else {
285 //
286 // Fill in the Page Directory entries
287 //
288 PageDirectory1GEntry->Uint64 = (UINT64)PageAddress | AddressEncMask;
289 PageDirectory1GEntry->Bits.ReadWrite = 1;
290 PageDirectory1GEntry->Bits.Present = 1;
291 PageDirectory1GEntry->Bits.MustBe1 = 1;
292 }
293 }
294 } else {
295 for (IndexOfPdpEntries = 0; IndexOfPdpEntries < NumberOfPdpEntriesNeeded; IndexOfPdpEntries++, PageDirectoryPointerEntry++) {
296 //
297 // Each Directory Pointer entries points to a page of Page Directory entires.
298 // So allocate space for them and fill them in in the IndexOfPageDirectoryEntries loop.
299 //
300 PageDirectoryEntry = (VOID *) BigPageAddress;
301 BigPageAddress += SIZE_4KB;
302
303 //
304 // Fill in a Page Directory Pointer Entries
305 //
306 PageDirectoryPointerEntry->Uint64 = (UINT64)(UINTN)PageDirectoryEntry | AddressEncMask;
307 PageDirectoryPointerEntry->Bits.ReadWrite = 1;
308 PageDirectoryPointerEntry->Bits.Present = 1;
309
310 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PageAddress += SIZE_2MB) {
311 if (PcdGetBool (PcdSetNxForStack) && (PageAddress < StackBase + StackSize) && ((PageAddress + SIZE_2MB) > StackBase)) {
312 //
313 // Need to split this 2M page that covers stack range.
314 //
315 Split2MPageTo4K (PageAddress, (UINT64 *) PageDirectoryEntry, StackBase, StackSize);
316 } else {
317 //
318 // Fill in the Page Directory entries
319 //
320 PageDirectoryEntry->Uint64 = (UINT64)PageAddress | AddressEncMask;
321 PageDirectoryEntry->Bits.ReadWrite = 1;
322 PageDirectoryEntry->Bits.Present = 1;
323 PageDirectoryEntry->Bits.MustBe1 = 1;
324 }
325 }
326 }
327
328 for (; IndexOfPdpEntries < 512; IndexOfPdpEntries++, PageDirectoryPointerEntry++) {
329 ZeroMem (
330 PageDirectoryPointerEntry,
331 sizeof(PAGE_MAP_AND_DIRECTORY_POINTER)
332 );
333 }
334 }
335 }
336
337 //
338 // For the PML4 entries we are not using fill in a null entry.
339 //
340 for (; IndexOfPml4Entries < 512; IndexOfPml4Entries++, PageMapLevel4Entry++) {
341 ZeroMem (
342 PageMapLevel4Entry,
343 sizeof (PAGE_MAP_AND_DIRECTORY_POINTER)
344 );
345 }
346
347 if (PcdGetBool (PcdSetNxForStack)) {
348 EnableExecuteDisableBit ();
349 }
350
351 return (UINTN)PageMap;
352 }
353