]> git.proxmox.com Git - mirror_edk2.git/blob - MdeModulePkg/Universal/CapsulePei/X64/X64Entry.c
MdeModulePkg: Replace BSD License with BSD+Patent License
[mirror_edk2.git] / MdeModulePkg / Universal / CapsulePei / X64 / X64Entry.c
1 /** @file
2 The X64 entrypoint is used to process capsule in long mode.
3
4 Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR>
5 Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
6
7 SPDX-License-Identifier: BSD-2-Clause-Patent
8
9 **/
10
11 #include <Library/DebugLib.h>
12 #include <Library/BaseMemoryLib.h>
13 #include <Library/CpuExceptionHandlerLib.h>
14 #include <Library/DebugAgentLib.h>
15 #include "CommonHeader.h"
16
17 #define EXCEPTION_VECTOR_NUMBER 0x22
18
19 #define IA32_PG_P BIT0
20 #define IA32_PG_RW BIT1
21 #define IA32_PG_PS BIT7
22
23 typedef struct _PAGE_FAULT_CONTEXT {
24 BOOLEAN Page1GSupport;
25 UINT64 PhyMask;
26 UINTN PageFaultBuffer;
27 UINTN PageFaultIndex;
28 UINT64 AddressEncMask;
29 //
30 // Store the uplink information for each page being used.
31 //
32 UINT64 *PageFaultUplink[EXTRA_PAGE_TABLE_PAGES];
33 VOID *OriginalHandler;
34 } PAGE_FAULT_CONTEXT;
35
36 typedef struct _PAGE_FAULT_IDT_TABLE {
37 PAGE_FAULT_CONTEXT PageFaultContext;
38 IA32_IDT_GATE_DESCRIPTOR IdtEntryTable[EXCEPTION_VECTOR_NUMBER];
39 } PAGE_FAULT_IDT_TABLE;
40
41 /**
42 Page fault handler.
43
44 **/
45 VOID
46 EFIAPI
47 PageFaultHandlerHook (
48 VOID
49 );
50
51 /**
52 Hook IDT with our page fault handler so that the on-demand paging works on page fault.
53
54 @param[in, out] IdtEntry Pointer to IDT entry.
55 @param[in, out] PageFaultContext Pointer to page fault context.
56
57 **/
58 VOID
59 HookPageFaultHandler (
60 IN OUT IA32_IDT_GATE_DESCRIPTOR *IdtEntry,
61 IN OUT PAGE_FAULT_CONTEXT *PageFaultContext
62 )
63 {
64 UINT32 RegEax;
65 UINT8 PhysicalAddressBits;
66 UINTN PageFaultHandlerHookAddress;
67
68 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);
69 if (RegEax >= 0x80000008) {
70 AsmCpuid (0x80000008, &RegEax, NULL, NULL, NULL);
71 PhysicalAddressBits = (UINT8) RegEax;
72 } else {
73 PhysicalAddressBits = 36;
74 }
75 PageFaultContext->PhyMask = LShiftU64 (1, PhysicalAddressBits) - 1;
76 PageFaultContext->PhyMask &= (1ull << 48) - SIZE_4KB;
77
78 //
79 // Set Page Fault entry to catch >4G access
80 //
81 PageFaultHandlerHookAddress = (UINTN)PageFaultHandlerHook;
82 PageFaultContext->OriginalHandler = (VOID *)(UINTN)(LShiftU64 (IdtEntry->Bits.OffsetUpper, 32) + IdtEntry->Bits.OffsetLow + (IdtEntry->Bits.OffsetHigh << 16));
83 IdtEntry->Bits.OffsetLow = (UINT16)PageFaultHandlerHookAddress;
84 IdtEntry->Bits.Selector = (UINT16)AsmReadCs ();
85 IdtEntry->Bits.Reserved_0 = 0;
86 IdtEntry->Bits.GateType = IA32_IDT_GATE_TYPE_INTERRUPT_32;
87 IdtEntry->Bits.OffsetHigh = (UINT16)(PageFaultHandlerHookAddress >> 16);
88 IdtEntry->Bits.OffsetUpper = (UINT32)(PageFaultHandlerHookAddress >> 32);
89 IdtEntry->Bits.Reserved_1 = 0;
90
91 if (PageFaultContext->Page1GSupport) {
92 PageFaultContext->PageFaultBuffer = (UINTN)(AsmReadCr3 () & PageFaultContext->PhyMask) + EFI_PAGES_TO_SIZE(2);
93 }else {
94 PageFaultContext->PageFaultBuffer = (UINTN)(AsmReadCr3 () & PageFaultContext->PhyMask) + EFI_PAGES_TO_SIZE(6);
95 }
96 PageFaultContext->PageFaultIndex = 0;
97 ZeroMem (PageFaultContext->PageFaultUplink, sizeof (PageFaultContext->PageFaultUplink));
98 }
99
100 /**
101 Acquire page for page fault.
102
103 @param[in, out] PageFaultContext Pointer to page fault context.
104 @param[in, out] Uplink Pointer to up page table entry.
105
106 **/
107 VOID
108 AcquirePage (
109 IN OUT PAGE_FAULT_CONTEXT *PageFaultContext,
110 IN OUT UINT64 *Uplink
111 )
112 {
113 UINTN Address;
114 UINT64 AddressEncMask;
115
116 Address = PageFaultContext->PageFaultBuffer + EFI_PAGES_TO_SIZE (PageFaultContext->PageFaultIndex);
117 ZeroMem ((VOID *) Address, EFI_PAGES_TO_SIZE (1));
118
119 AddressEncMask = PageFaultContext->AddressEncMask;
120
121 //
122 // Cut the previous uplink if it exists and wasn't overwritten.
123 //
124 if ((PageFaultContext->PageFaultUplink[PageFaultContext->PageFaultIndex] != NULL) &&
125 ((*PageFaultContext->PageFaultUplink[PageFaultContext->PageFaultIndex] & ~AddressEncMask & PageFaultContext->PhyMask) == Address)) {
126 *PageFaultContext->PageFaultUplink[PageFaultContext->PageFaultIndex] = 0;
127 }
128
129 //
130 // Link & Record the current uplink.
131 //
132 *Uplink = Address | AddressEncMask | IA32_PG_P | IA32_PG_RW;
133 PageFaultContext->PageFaultUplink[PageFaultContext->PageFaultIndex] = Uplink;
134
135 PageFaultContext->PageFaultIndex = (PageFaultContext->PageFaultIndex + 1) % EXTRA_PAGE_TABLE_PAGES;
136 }
137
138 /**
139 The page fault handler that on-demand read >4G memory/MMIO.
140
141 @retval NULL The page fault is correctly handled.
142 @retval OriginalHandler The page fault is not handled and is passed through to original handler.
143
144 **/
145 VOID *
146 EFIAPI
147 PageFaultHandler (
148 VOID
149 )
150 {
151 IA32_DESCRIPTOR Idtr;
152 PAGE_FAULT_CONTEXT *PageFaultContext;
153 UINT64 PhyMask;
154 UINT64 *PageTable;
155 UINT64 PFAddress;
156 UINTN PTIndex;
157 UINT64 AddressEncMask;
158
159 //
160 // Get the IDT Descriptor.
161 //
162 AsmReadIdtr ((IA32_DESCRIPTOR *) &Idtr);
163 //
164 // Then get page fault context by IDT Descriptor.
165 //
166 PageFaultContext = (PAGE_FAULT_CONTEXT *) (UINTN) (Idtr.Base - sizeof (PAGE_FAULT_CONTEXT));
167 PhyMask = PageFaultContext->PhyMask;
168 AddressEncMask = PageFaultContext->AddressEncMask;
169
170 PFAddress = AsmReadCr2 ();
171 DEBUG ((EFI_D_ERROR, "CapsuleX64 - PageFaultHandler: Cr2 - %lx\n", PFAddress));
172
173 if (PFAddress >= PhyMask + SIZE_4KB) {
174 return PageFaultContext->OriginalHandler;
175 }
176 PFAddress &= PhyMask;
177
178 PageTable = (UINT64*)(UINTN)(AsmReadCr3 () & PhyMask);
179
180 PTIndex = BitFieldRead64 (PFAddress, 39, 47);
181 // PML4E
182 if ((PageTable[PTIndex] & IA32_PG_P) == 0) {
183 AcquirePage (PageFaultContext, &PageTable[PTIndex]);
184 }
185 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & ~AddressEncMask & PhyMask);
186 PTIndex = BitFieldRead64 (PFAddress, 30, 38);
187 // PDPTE
188 if (PageFaultContext->Page1GSupport) {
189 PageTable[PTIndex] = ((PFAddress | AddressEncMask) & ~((1ull << 30) - 1)) | IA32_PG_P | IA32_PG_RW | IA32_PG_PS;
190 } else {
191 if ((PageTable[PTIndex] & IA32_PG_P) == 0) {
192 AcquirePage (PageFaultContext, &PageTable[PTIndex]);
193 }
194 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & ~AddressEncMask & PhyMask);
195 PTIndex = BitFieldRead64 (PFAddress, 21, 29);
196 // PD
197 PageTable[PTIndex] = ((PFAddress | AddressEncMask) & ~((1ull << 21) - 1)) | IA32_PG_P | IA32_PG_RW | IA32_PG_PS;
198 }
199
200 return NULL;
201 }
202
203
204 /**
205 The X64 entrypoint is used to process capsule in long mode then
206 return to 32-bit protected mode.
207
208 @param EntrypointContext Pointer to the context of long mode.
209 @param ReturnContext Pointer to the context of 32-bit protected mode.
210
211 @retval This function should never return actually.
212
213 **/
214 EFI_STATUS
215 EFIAPI
216 _ModuleEntryPoint (
217 SWITCH_32_TO_64_CONTEXT *EntrypointContext,
218 SWITCH_64_TO_32_CONTEXT *ReturnContext
219 )
220 {
221 EFI_STATUS Status;
222 IA32_DESCRIPTOR Ia32Idtr;
223 IA32_DESCRIPTOR X64Idtr;
224 PAGE_FAULT_IDT_TABLE PageFaultIdtTable;
225 IA32_IDT_GATE_DESCRIPTOR *IdtEntry;
226
227 //
228 // Save the IA32 IDT Descriptor
229 //
230 AsmReadIdtr ((IA32_DESCRIPTOR *) &Ia32Idtr);
231
232 //
233 // Setup X64 IDT table
234 //
235 ZeroMem (PageFaultIdtTable.IdtEntryTable, sizeof (IA32_IDT_GATE_DESCRIPTOR) * EXCEPTION_VECTOR_NUMBER);
236 X64Idtr.Base = (UINTN) PageFaultIdtTable.IdtEntryTable;
237 X64Idtr.Limit = (UINT16) (sizeof (IA32_IDT_GATE_DESCRIPTOR) * EXCEPTION_VECTOR_NUMBER - 1);
238 AsmWriteIdtr ((IA32_DESCRIPTOR *) &X64Idtr);
239
240 //
241 // Setup the default CPU exception handlers
242 //
243 Status = InitializeCpuExceptionHandlers (NULL);
244 ASSERT_EFI_ERROR (Status);
245
246 //
247 // Hook page fault handler to handle >4G request.
248 //
249 PageFaultIdtTable.PageFaultContext.Page1GSupport = EntrypointContext->Page1GSupport;
250 PageFaultIdtTable.PageFaultContext.AddressEncMask = EntrypointContext->AddressEncMask;
251 IdtEntry = (IA32_IDT_GATE_DESCRIPTOR *) (X64Idtr.Base + (14 * sizeof (IA32_IDT_GATE_DESCRIPTOR)));
252 HookPageFaultHandler (IdtEntry, &(PageFaultIdtTable.PageFaultContext));
253
254 //
255 // Initialize Debug Agent to support source level debug
256 //
257 InitializeDebugAgent (DEBUG_AGENT_INIT_THUNK_PEI_IA32TOX64, (VOID *) &Ia32Idtr, NULL);
258
259 //
260 // Call CapsuleDataCoalesce to process capsule.
261 //
262 Status = CapsuleDataCoalesce (
263 NULL,
264 (EFI_PHYSICAL_ADDRESS *) (UINTN) EntrypointContext->BlockListAddr,
265 (MEMORY_RESOURCE_DESCRIPTOR *) (UINTN) EntrypointContext->MemoryResource,
266 (VOID **) (UINTN) EntrypointContext->MemoryBase64Ptr,
267 (UINTN *) (UINTN) EntrypointContext->MemorySize64Ptr
268 );
269
270 ReturnContext->ReturnStatus = Status;
271
272 DEBUG ((
273 DEBUG_INFO,
274 "%a() Stack Base: 0x%lx, Stack Size: 0x%lx\n",
275 __FUNCTION__,
276 EntrypointContext->StackBufferBase,
277 EntrypointContext->StackBufferLength
278 ));
279
280 //
281 // Disable interrupt of Debug timer, since the new IDT table cannot work in long mode
282 //
283 SaveAndSetDebugTimerInterrupt (FALSE);
284 //
285 // Restore IA32 IDT table
286 //
287 AsmWriteIdtr ((IA32_DESCRIPTOR *) &Ia32Idtr);
288
289 //
290 // Finish to coalesce capsule, and return to 32-bit mode.
291 //
292 AsmDisablePaging64 (
293 ReturnContext->ReturnCs,
294 (UINT32) ReturnContext->ReturnEntryPoint,
295 (UINT32) (UINTN) EntrypointContext,
296 (UINT32) (UINTN) ReturnContext,
297 (UINT32) (EntrypointContext->StackBufferBase + EntrypointContext->StackBufferLength)
298 );
299
300 //
301 // Should never be here.
302 //
303 ASSERT (FALSE);
304 return EFI_SUCCESS;
305 }