]> git.proxmox.com Git - mirror_edk2.git/blob - MdeModulePkg/Core/DxeIplPeim/Ia32/DxeLoadFunc.c
MdeModulePkg/DxeIpl: disable paging before creating new page table
[mirror_edk2.git] / MdeModulePkg / Core / DxeIplPeim / Ia32 / DxeLoadFunc.c
1 /** @file
2 Ia32-specific functionality for DxeLoad.
3
4 Copyright (c) 2006 - 2018, Intel Corporation. All rights reserved.<BR>
5 Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
6
7 This program and the accompanying materials
8 are licensed and made available under the terms and conditions of the BSD License
9 which accompanies this distribution. The full text of the license may be found at
10 http://opensource.org/licenses/bsd-license.php
11
12 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
13 WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
14
15 **/
16
17 #include "DxeIpl.h"
18 #include "VirtualMemory.h"
19
20 #define IDT_ENTRY_COUNT 32
21
22 typedef struct _X64_IDT_TABLE {
23 //
24 // Reserved 4 bytes preceding PeiService and IdtTable,
25 // since IDT base address should be 8-byte alignment.
26 //
27 UINT32 Reserved;
28 CONST EFI_PEI_SERVICES **PeiService;
29 X64_IDT_GATE_DESCRIPTOR IdtTable[IDT_ENTRY_COUNT];
30 } X64_IDT_TABLE;
31
32 //
33 // Global Descriptor Table (GDT)
34 //
35 GLOBAL_REMOVE_IF_UNREFERENCED IA32_GDT gGdtEntries[] = {
36 /* selector { Global Segment Descriptor } */
37 /* 0x00 */ {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}, //null descriptor
38 /* 0x08 */ {{0xffff, 0, 0, 0x2, 1, 0, 1, 0xf, 0, 0, 1, 1, 0}}, //linear data segment descriptor
39 /* 0x10 */ {{0xffff, 0, 0, 0xf, 1, 0, 1, 0xf, 0, 0, 1, 1, 0}}, //linear code segment descriptor
40 /* 0x18 */ {{0xffff, 0, 0, 0x3, 1, 0, 1, 0xf, 0, 0, 1, 1, 0}}, //system data segment descriptor
41 /* 0x20 */ {{0xffff, 0, 0, 0xa, 1, 0, 1, 0xf, 0, 0, 1, 1, 0}}, //system code segment descriptor
42 /* 0x28 */ {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}, //spare segment descriptor
43 /* 0x30 */ {{0xffff, 0, 0, 0x2, 1, 0, 1, 0xf, 0, 0, 1, 1, 0}}, //system data segment descriptor
44 /* 0x38 */ {{0xffff, 0, 0, 0xa, 1, 0, 1, 0xf, 0, 1, 0, 1, 0}}, //system code segment descriptor
45 /* 0x40 */ {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}, //spare segment descriptor
46 };
47
48 //
49 // IA32 Gdt register
50 //
51 GLOBAL_REMOVE_IF_UNREFERENCED CONST IA32_DESCRIPTOR gGdt = {
52 sizeof (gGdtEntries) - 1,
53 (UINTN) gGdtEntries
54 };
55
56 GLOBAL_REMOVE_IF_UNREFERENCED IA32_DESCRIPTOR gLidtDescriptor = {
57 sizeof (X64_IDT_GATE_DESCRIPTOR) * IDT_ENTRY_COUNT - 1,
58 0
59 };
60
61 /**
62 Allocates and fills in the Page Directory and Page Table Entries to
63 establish a 4G page table.
64
65 @param[in] StackBase Stack base address.
66 @param[in] StackSize Stack size.
67
68 @return The address of page table.
69
70 **/
71 UINTN
72 Create4GPageTablesIa32Pae (
73 IN EFI_PHYSICAL_ADDRESS StackBase,
74 IN UINTN StackSize
75 )
76 {
77 UINT8 PhysicalAddressBits;
78 EFI_PHYSICAL_ADDRESS PhysicalAddress;
79 UINTN IndexOfPdpEntries;
80 UINTN IndexOfPageDirectoryEntries;
81 UINT32 NumberOfPdpEntriesNeeded;
82 PAGE_MAP_AND_DIRECTORY_POINTER *PageMap;
83 PAGE_MAP_AND_DIRECTORY_POINTER *PageDirectoryPointerEntry;
84 PAGE_TABLE_ENTRY *PageDirectoryEntry;
85 UINTN TotalPagesNum;
86 UINTN PageAddress;
87 UINT64 AddressEncMask;
88
89 //
90 // Make sure AddressEncMask is contained to smallest supported address field
91 //
92 AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;
93
94 PhysicalAddressBits = 32;
95
96 //
97 // Calculate the table entries needed.
98 //
99 NumberOfPdpEntriesNeeded = (UINT32) LShiftU64 (1, (PhysicalAddressBits - 30));
100
101 TotalPagesNum = NumberOfPdpEntriesNeeded + 1;
102 PageAddress = (UINTN) AllocatePageTableMemory (TotalPagesNum);
103 ASSERT (PageAddress != 0);
104
105 PageMap = (VOID *) PageAddress;
106 PageAddress += SIZE_4KB;
107
108 PageDirectoryPointerEntry = PageMap;
109 PhysicalAddress = 0;
110
111 for (IndexOfPdpEntries = 0; IndexOfPdpEntries < NumberOfPdpEntriesNeeded; IndexOfPdpEntries++, PageDirectoryPointerEntry++) {
112 //
113 // Each Directory Pointer entries points to a page of Page Directory entires.
114 // So allocate space for them and fill them in in the IndexOfPageDirectoryEntries loop.
115 //
116 PageDirectoryEntry = (VOID *) PageAddress;
117 PageAddress += SIZE_4KB;
118
119 //
120 // Fill in a Page Directory Pointer Entries
121 //
122 PageDirectoryPointerEntry->Uint64 = (UINT64) (UINTN) PageDirectoryEntry | AddressEncMask;
123 PageDirectoryPointerEntry->Bits.Present = 1;
124
125 for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PhysicalAddress += SIZE_2MB) {
126 if ((IsNullDetectionEnabled () && PhysicalAddress == 0)
127 || ((PhysicalAddress < StackBase + StackSize)
128 && ((PhysicalAddress + SIZE_2MB) > StackBase))) {
129 //
130 // Need to split this 2M page that covers stack range.
131 //
132 Split2MPageTo4K (PhysicalAddress, (UINT64 *) PageDirectoryEntry, StackBase, StackSize);
133 } else {
134 //
135 // Fill in the Page Directory entries
136 //
137 PageDirectoryEntry->Uint64 = (UINT64) PhysicalAddress | AddressEncMask;
138 PageDirectoryEntry->Bits.ReadWrite = 1;
139 PageDirectoryEntry->Bits.Present = 1;
140 PageDirectoryEntry->Bits.MustBe1 = 1;
141 }
142 }
143 }
144
145 for (; IndexOfPdpEntries < 512; IndexOfPdpEntries++, PageDirectoryPointerEntry++) {
146 ZeroMem (
147 PageDirectoryPointerEntry,
148 sizeof (PAGE_MAP_AND_DIRECTORY_POINTER)
149 );
150 }
151
152 //
153 // Protect the page table by marking the memory used for page table to be
154 // read-only.
155 //
156 EnablePageTableProtection ((UINTN)PageMap, FALSE);
157
158 return (UINTN) PageMap;
159 }
160
161 /**
162 The function will check if IA32 PAE is supported.
163
164 @retval TRUE IA32 PAE is supported.
165 @retval FALSE IA32 PAE is not supported.
166
167 **/
168 BOOLEAN
169 IsIa32PaeSupport (
170 VOID
171 )
172 {
173 UINT32 RegEax;
174 UINT32 RegEdx;
175 BOOLEAN Ia32PaeSupport;
176
177 Ia32PaeSupport = FALSE;
178 AsmCpuid (0x0, &RegEax, NULL, NULL, NULL);
179 if (RegEax >= 0x1) {
180 AsmCpuid (0x1, NULL, NULL, NULL, &RegEdx);
181 if ((RegEdx & BIT6) != 0) {
182 Ia32PaeSupport = TRUE;
183 }
184 }
185
186 return Ia32PaeSupport;
187 }
188
189 /**
190 The function will check if Execute Disable Bit is available.
191
192 @retval TRUE Execute Disable Bit is available.
193 @retval FALSE Execute Disable Bit is not available.
194
195 **/
196 BOOLEAN
197 IsExecuteDisableBitAvailable (
198 VOID
199 )
200 {
201 UINT32 RegEax;
202 UINT32 RegEdx;
203 BOOLEAN Available;
204
205 Available = FALSE;
206 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);
207 if (RegEax >= 0x80000001) {
208 AsmCpuid (0x80000001, NULL, NULL, NULL, &RegEdx);
209 if ((RegEdx & BIT20) != 0) {
210 //
211 // Bit 20: Execute Disable Bit available.
212 //
213 Available = TRUE;
214 }
215 }
216
217 return Available;
218 }
219
220 /**
221 The function will check if page table should be setup or not.
222
223 @retval TRUE Page table should be created.
224 @retval FALSE Page table should not be created.
225
226 **/
227 BOOLEAN
228 ToBuildPageTable (
229 VOID
230 )
231 {
232 if (!IsIa32PaeSupport ()) {
233 return FALSE;
234 }
235
236 if (IsNullDetectionEnabled ()) {
237 return TRUE;
238 }
239
240 if (PcdGet8 (PcdHeapGuardPropertyMask) != 0) {
241 return TRUE;
242 }
243
244 if (PcdGetBool (PcdCpuStackGuard)) {
245 return TRUE;
246 }
247
248 if (PcdGetBool (PcdSetNxForStack) && IsExecuteDisableBitAvailable ()) {
249 return TRUE;
250 }
251
252 return FALSE;
253 }
254
255 /**
256 Transfers control to DxeCore.
257
258 This function performs a CPU architecture specific operations to execute
259 the entry point of DxeCore with the parameters of HobList.
260 It also installs EFI_END_OF_PEI_PPI to signal the end of PEI phase.
261
262 @param DxeCoreEntryPoint The entry point of DxeCore.
263 @param HobList The start of HobList passed to DxeCore.
264
265 **/
266 VOID
267 HandOffToDxeCore (
268 IN EFI_PHYSICAL_ADDRESS DxeCoreEntryPoint,
269 IN EFI_PEI_HOB_POINTERS HobList
270 )
271 {
272 EFI_STATUS Status;
273 EFI_PHYSICAL_ADDRESS BaseOfStack;
274 EFI_PHYSICAL_ADDRESS TopOfStack;
275 UINTN PageTables;
276 X64_IDT_GATE_DESCRIPTOR *IdtTable;
277 UINTN SizeOfTemplate;
278 VOID *TemplateBase;
279 EFI_PHYSICAL_ADDRESS VectorAddress;
280 UINT32 Index;
281 X64_IDT_TABLE *IdtTableForX64;
282 EFI_VECTOR_HANDOFF_INFO *VectorInfo;
283 EFI_PEI_VECTOR_HANDOFF_INFO_PPI *VectorHandoffInfoPpi;
284 BOOLEAN BuildPageTablesIa32Pae;
285
286 if (IsNullDetectionEnabled ()) {
287 ClearFirst4KPage (HobList.Raw);
288 }
289
290 Status = PeiServicesAllocatePages (EfiBootServicesData, EFI_SIZE_TO_PAGES (STACK_SIZE), &BaseOfStack);
291 ASSERT_EFI_ERROR (Status);
292
293 if (FeaturePcdGet(PcdDxeIplSwitchToLongMode)) {
294 //
295 // Compute the top of the stack we were allocated, which is used to load X64 dxe core.
296 // Pre-allocate a 32 bytes which confroms to x64 calling convention.
297 //
298 // The first four parameters to a function are passed in rcx, rdx, r8 and r9.
299 // Any further parameters are pushed on the stack. Furthermore, space (4 * 8bytes) for the
300 // register parameters is reserved on the stack, in case the called function
301 // wants to spill them; this is important if the function is variadic.
302 //
303 TopOfStack = BaseOfStack + EFI_SIZE_TO_PAGES (STACK_SIZE) * EFI_PAGE_SIZE - 32;
304
305 //
306 // x64 Calling Conventions requires that the stack must be aligned to 16 bytes
307 //
308 TopOfStack = (EFI_PHYSICAL_ADDRESS) (UINTN) ALIGN_POINTER (TopOfStack, 16);
309
310 //
311 // Load the GDT of Go64. Since the GDT of 32-bit Tiano locates in the BS_DATA
312 // memory, it may be corrupted when copying FV to high-end memory
313 //
314 AsmWriteGdtr (&gGdt);
315 //
316 // Create page table and save PageMapLevel4 to CR3
317 //
318 PageTables = CreateIdentityMappingPageTables (BaseOfStack, STACK_SIZE);
319
320 //
321 // End of PEI phase signal
322 //
323 PERF_EVENT_SIGNAL_BEGIN (gEndOfPeiSignalPpi.Guid);
324 Status = PeiServicesInstallPpi (&gEndOfPeiSignalPpi);
325 PERF_EVENT_SIGNAL_END (gEndOfPeiSignalPpi.Guid);
326 ASSERT_EFI_ERROR (Status);
327
328 //
329 // Paging might be already enabled. To avoid conflict configuration,
330 // disable paging first anyway.
331 //
332 AsmWriteCr0 (AsmReadCr0 () & (~BIT31));
333 AsmWriteCr3 (PageTables);
334
335 //
336 // Update the contents of BSP stack HOB to reflect the real stack info passed to DxeCore.
337 //
338 UpdateStackHob (BaseOfStack, STACK_SIZE);
339
340 SizeOfTemplate = AsmGetVectorTemplatInfo (&TemplateBase);
341
342 Status = PeiServicesAllocatePages (
343 EfiBootServicesData,
344 EFI_SIZE_TO_PAGES(sizeof (X64_IDT_TABLE) + SizeOfTemplate * IDT_ENTRY_COUNT),
345 &VectorAddress
346 );
347 ASSERT_EFI_ERROR (Status);
348
349 //
350 // Store EFI_PEI_SERVICES** in the 4 bytes immediately preceding IDT to avoid that
351 // it may not be gotten correctly after IDT register is re-written.
352 //
353 IdtTableForX64 = (X64_IDT_TABLE *) (UINTN) VectorAddress;
354 IdtTableForX64->PeiService = GetPeiServicesTablePointer ();
355
356 VectorAddress = (EFI_PHYSICAL_ADDRESS) (UINTN) (IdtTableForX64 + 1);
357 IdtTable = IdtTableForX64->IdtTable;
358 for (Index = 0; Index < IDT_ENTRY_COUNT; Index++) {
359 IdtTable[Index].Ia32IdtEntry.Bits.GateType = 0x8e;
360 IdtTable[Index].Ia32IdtEntry.Bits.Reserved_0 = 0;
361 IdtTable[Index].Ia32IdtEntry.Bits.Selector = SYS_CODE64_SEL;
362
363 IdtTable[Index].Ia32IdtEntry.Bits.OffsetLow = (UINT16) VectorAddress;
364 IdtTable[Index].Ia32IdtEntry.Bits.OffsetHigh = (UINT16) (RShiftU64 (VectorAddress, 16));
365 IdtTable[Index].Offset32To63 = (UINT32) (RShiftU64 (VectorAddress, 32));
366 IdtTable[Index].Reserved = 0;
367
368 CopyMem ((VOID *) (UINTN) VectorAddress, TemplateBase, SizeOfTemplate);
369 AsmVectorFixup ((VOID *) (UINTN) VectorAddress, (UINT8) Index);
370
371 VectorAddress += SizeOfTemplate;
372 }
373
374 gLidtDescriptor.Base = (UINTN) IdtTable;
375
376 //
377 // Disable interrupt of Debug timer, since new IDT table cannot handle it.
378 //
379 SaveAndSetDebugTimerInterrupt (FALSE);
380
381 AsmWriteIdtr (&gLidtDescriptor);
382
383 DEBUG ((
384 DEBUG_INFO,
385 "%a() Stack Base: 0x%lx, Stack Size: 0x%x\n",
386 __FUNCTION__,
387 BaseOfStack,
388 STACK_SIZE
389 ));
390
391 //
392 // Go to Long Mode and transfer control to DxeCore.
393 // Interrupts will not get turned on until the CPU AP is loaded.
394 // Call x64 drivers passing in single argument, a pointer to the HOBs.
395 //
396 AsmEnablePaging64 (
397 SYS_CODE64_SEL,
398 DxeCoreEntryPoint,
399 (EFI_PHYSICAL_ADDRESS)(UINTN)(HobList.Raw),
400 0,
401 TopOfStack
402 );
403 } else {
404 //
405 // Get Vector Hand-off Info PPI and build Guided HOB
406 //
407 Status = PeiServicesLocatePpi (
408 &gEfiVectorHandoffInfoPpiGuid,
409 0,
410 NULL,
411 (VOID **)&VectorHandoffInfoPpi
412 );
413 if (Status == EFI_SUCCESS) {
414 DEBUG ((EFI_D_INFO, "Vector Hand-off Info PPI is gotten, GUIDed HOB is created!\n"));
415 VectorInfo = VectorHandoffInfoPpi->Info;
416 Index = 1;
417 while (VectorInfo->Attribute != EFI_VECTOR_HANDOFF_LAST_ENTRY) {
418 VectorInfo ++;
419 Index ++;
420 }
421 BuildGuidDataHob (
422 &gEfiVectorHandoffInfoPpiGuid,
423 VectorHandoffInfoPpi->Info,
424 sizeof (EFI_VECTOR_HANDOFF_INFO) * Index
425 );
426 }
427
428 //
429 // Compute the top of the stack we were allocated. Pre-allocate a UINTN
430 // for safety.
431 //
432 TopOfStack = BaseOfStack + EFI_SIZE_TO_PAGES (STACK_SIZE) * EFI_PAGE_SIZE - CPU_STACK_ALIGNMENT;
433 TopOfStack = (EFI_PHYSICAL_ADDRESS) (UINTN) ALIGN_POINTER (TopOfStack, CPU_STACK_ALIGNMENT);
434
435 PageTables = 0;
436 BuildPageTablesIa32Pae = ToBuildPageTable ();
437 if (BuildPageTablesIa32Pae) {
438 PageTables = Create4GPageTablesIa32Pae (BaseOfStack, STACK_SIZE);
439 if (IsExecuteDisableBitAvailable ()) {
440 EnableExecuteDisableBit();
441 }
442 }
443
444 //
445 // End of PEI phase signal
446 //
447 PERF_EVENT_SIGNAL_BEGIN (gEndOfPeiSignalPpi.Guid);
448 Status = PeiServicesInstallPpi (&gEndOfPeiSignalPpi);
449 PERF_EVENT_SIGNAL_END (gEndOfPeiSignalPpi.Guid);
450 ASSERT_EFI_ERROR (Status);
451
452 if (BuildPageTablesIa32Pae) {
453 //
454 // Paging might be already enabled. To avoid conflict configuration,
455 // disable paging first anyway.
456 //
457 AsmWriteCr0 (AsmReadCr0 () & (~BIT31));
458 AsmWriteCr3 (PageTables);
459 //
460 // Set Physical Address Extension (bit 5 of CR4).
461 //
462 AsmWriteCr4 (AsmReadCr4 () | BIT5);
463 }
464
465 //
466 // Update the contents of BSP stack HOB to reflect the real stack info passed to DxeCore.
467 //
468 UpdateStackHob (BaseOfStack, STACK_SIZE);
469
470 DEBUG ((
471 DEBUG_INFO,
472 "%a() Stack Base: 0x%lx, Stack Size: 0x%x\n",
473 __FUNCTION__,
474 BaseOfStack,
475 STACK_SIZE
476 ));
477
478 //
479 // Transfer the control to the entry point of DxeCore.
480 //
481 if (BuildPageTablesIa32Pae) {
482 AsmEnablePaging32 (
483 (SWITCH_STACK_ENTRY_POINT)(UINTN)DxeCoreEntryPoint,
484 HobList.Raw,
485 NULL,
486 (VOID *) (UINTN) TopOfStack
487 );
488 } else {
489 SwitchStack (
490 (SWITCH_STACK_ENTRY_POINT)(UINTN)DxeCoreEntryPoint,
491 HobList.Raw,
492 NULL,
493 (VOID *) (UINTN) TopOfStack
494 );
495 }
496 }
497 }
498