]>
Commit | Line | Data |
---|---|---|
1 | /** @file\r | |
2 | Agent Module to load other modules to deploy SMM Entry Vector for X86 CPU.\r | |
3 | \r | |
4 | Copyright (c) 2009 - 2019, Intel Corporation. All rights reserved.<BR>\r | |
5 | Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>\r | |
6 | \r | |
7 | SPDX-License-Identifier: BSD-2-Clause-Patent\r | |
8 | \r | |
9 | **/\r | |
10 | \r | |
11 | #include "PiSmmCpuDxeSmm.h"\r | |
12 | \r | |
13 | //\r | |
14 | // SMM CPU Private Data structure that contains SMM Configuration Protocol\r | |
15 | // along its supporting fields.\r | |
16 | //\r | |
17 | SMM_CPU_PRIVATE_DATA mSmmCpuPrivateData = {\r | |
18 | SMM_CPU_PRIVATE_DATA_SIGNATURE, // Signature\r | |
19 | NULL, // SmmCpuHandle\r | |
20 | NULL, // Pointer to ProcessorInfo array\r | |
21 | NULL, // Pointer to Operation array\r | |
22 | NULL, // Pointer to CpuSaveStateSize array\r | |
23 | NULL, // Pointer to CpuSaveState array\r | |
24 | {\r | |
25 | { 0 }\r | |
26 | }, // SmmReservedSmramRegion\r | |
27 | {\r | |
28 | SmmStartupThisAp, // SmmCoreEntryContext.SmmStartupThisAp\r | |
29 | 0, // SmmCoreEntryContext.CurrentlyExecutingCpu\r | |
30 | 0, // SmmCoreEntryContext.NumberOfCpus\r | |
31 | NULL, // SmmCoreEntryContext.CpuSaveStateSize\r | |
32 | NULL // SmmCoreEntryContext.CpuSaveState\r | |
33 | },\r | |
34 | NULL, // SmmCoreEntry\r | |
35 | {\r | |
36 | mSmmCpuPrivateData.SmmReservedSmramRegion, // SmmConfiguration.SmramReservedRegions\r | |
37 | RegisterSmmEntry // SmmConfiguration.RegisterSmmEntry\r | |
38 | },\r | |
39 | NULL, // pointer to Ap Wrapper Func array\r | |
40 | { NULL, NULL }, // List_Entry for Tokens.\r | |
41 | };\r | |
42 | \r | |
43 | CPU_HOT_PLUG_DATA mCpuHotPlugData = {\r | |
44 | CPU_HOT_PLUG_DATA_REVISION_1, // Revision\r | |
45 | 0, // Array Length of SmBase and APIC ID\r | |
46 | NULL, // Pointer to APIC ID array\r | |
47 | NULL, // Pointer to SMBASE array\r | |
48 | 0, // Reserved\r | |
49 | 0, // SmrrBase\r | |
50 | 0 // SmrrSize\r | |
51 | };\r | |
52 | \r | |
53 | //\r | |
54 | // Global pointer used to access mSmmCpuPrivateData from outside and inside SMM\r | |
55 | //\r | |
56 | SMM_CPU_PRIVATE_DATA *gSmmCpuPrivate = &mSmmCpuPrivateData;\r | |
57 | \r | |
58 | //\r | |
59 | // SMM Relocation variables\r | |
60 | //\r | |
61 | volatile BOOLEAN *mRebased;\r | |
62 | volatile BOOLEAN mIsBsp;\r | |
63 | \r | |
64 | ///\r | |
65 | /// Handle for the SMM CPU Protocol\r | |
66 | ///\r | |
67 | EFI_HANDLE mSmmCpuHandle = NULL;\r | |
68 | \r | |
69 | ///\r | |
70 | /// SMM CPU Protocol instance\r | |
71 | ///\r | |
72 | EFI_SMM_CPU_PROTOCOL mSmmCpu = {\r | |
73 | SmmReadSaveState,\r | |
74 | SmmWriteSaveState\r | |
75 | };\r | |
76 | \r | |
77 | ///\r | |
78 | /// SMM Memory Attribute Protocol instance\r | |
79 | ///\r | |
80 | EDKII_SMM_MEMORY_ATTRIBUTE_PROTOCOL mSmmMemoryAttribute = {\r | |
81 | EdkiiSmmGetMemoryAttributes,\r | |
82 | EdkiiSmmSetMemoryAttributes,\r | |
83 | EdkiiSmmClearMemoryAttributes\r | |
84 | };\r | |
85 | \r | |
86 | EFI_CPU_INTERRUPT_HANDLER mExternalVectorTable[EXCEPTION_VECTOR_NUMBER];\r | |
87 | \r | |
88 | //\r | |
89 | // SMM stack information\r | |
90 | //\r | |
91 | UINTN mSmmStackArrayBase;\r | |
92 | UINTN mSmmStackArrayEnd;\r | |
93 | UINTN mSmmStackSize;\r | |
94 | \r | |
95 | UINTN mSmmShadowStackSize;\r | |
96 | BOOLEAN mCetSupported = TRUE;\r | |
97 | \r | |
98 | UINTN mMaxNumberOfCpus = 1;\r | |
99 | UINTN mNumberOfCpus = 1;\r | |
100 | \r | |
101 | //\r | |
102 | // SMM ready to lock flag\r | |
103 | //\r | |
104 | BOOLEAN mSmmReadyToLock = FALSE;\r | |
105 | \r | |
106 | //\r | |
107 | // Global used to cache PCD for SMM Code Access Check enable\r | |
108 | //\r | |
109 | BOOLEAN mSmmCodeAccessCheckEnable = FALSE;\r | |
110 | \r | |
111 | //\r | |
112 | // Global copy of the PcdPteMemoryEncryptionAddressOrMask\r | |
113 | //\r | |
114 | UINT64 mAddressEncMask = 0;\r | |
115 | \r | |
116 | //\r | |
117 | // Spin lock used to serialize setting of SMM Code Access Check feature\r | |
118 | //\r | |
119 | SPIN_LOCK *mConfigSmmCodeAccessCheckLock = NULL;\r | |
120 | \r | |
121 | //\r | |
122 | // Saved SMM ranges information\r | |
123 | //\r | |
124 | EFI_SMRAM_DESCRIPTOR *mSmmCpuSmramRanges;\r | |
125 | UINTN mSmmCpuSmramRangeCount;\r | |
126 | \r | |
127 | UINT8 mPhysicalAddressBits;\r | |
128 | \r | |
129 | //\r | |
130 | // Control register contents saved for SMM S3 resume state initialization.\r | |
131 | //\r | |
132 | UINT32 mSmmCr0;\r | |
133 | UINT32 mSmmCr4;\r | |
134 | \r | |
135 | /**\r | |
136 | Initialize IDT to setup exception handlers for SMM.\r | |
137 | \r | |
138 | **/\r | |
139 | VOID\r | |
140 | InitializeSmmIdt (\r | |
141 | VOID\r | |
142 | )\r | |
143 | {\r | |
144 | EFI_STATUS Status;\r | |
145 | BOOLEAN InterruptState;\r | |
146 | IA32_DESCRIPTOR DxeIdtr;\r | |
147 | \r | |
148 | //\r | |
149 | // There are 32 (not 255) entries in it since only processor\r | |
150 | // generated exceptions will be handled.\r | |
151 | //\r | |
152 | gcSmiIdtr.Limit = (sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32) - 1;\r | |
153 | //\r | |
154 | // Allocate page aligned IDT, because it might be set as read only.\r | |
155 | //\r | |
156 | gcSmiIdtr.Base = (UINTN)AllocateCodePages (EFI_SIZE_TO_PAGES (gcSmiIdtr.Limit + 1));\r | |
157 | ASSERT (gcSmiIdtr.Base != 0);\r | |
158 | ZeroMem ((VOID *)gcSmiIdtr.Base, gcSmiIdtr.Limit + 1);\r | |
159 | \r | |
160 | //\r | |
161 | // Disable Interrupt and save DXE IDT table\r | |
162 | //\r | |
163 | InterruptState = SaveAndDisableInterrupts ();\r | |
164 | AsmReadIdtr (&DxeIdtr);\r | |
165 | //\r | |
166 | // Load SMM temporary IDT table\r | |
167 | //\r | |
168 | AsmWriteIdtr (&gcSmiIdtr);\r | |
169 | //\r | |
170 | // Setup SMM default exception handlers, SMM IDT table\r | |
171 | // will be updated and saved in gcSmiIdtr\r | |
172 | //\r | |
173 | Status = InitializeCpuExceptionHandlers (NULL);\r | |
174 | ASSERT_EFI_ERROR (Status);\r | |
175 | //\r | |
176 | // Restore DXE IDT table and CPU interrupt\r | |
177 | //\r | |
178 | AsmWriteIdtr ((IA32_DESCRIPTOR *)&DxeIdtr);\r | |
179 | SetInterruptState (InterruptState);\r | |
180 | }\r | |
181 | \r | |
182 | /**\r | |
183 | Search module name by input IP address and output it.\r | |
184 | \r | |
185 | @param CallerIpAddress Caller instruction pointer.\r | |
186 | \r | |
187 | **/\r | |
188 | VOID\r | |
189 | DumpModuleInfoByIp (\r | |
190 | IN UINTN CallerIpAddress\r | |
191 | )\r | |
192 | {\r | |
193 | UINTN Pe32Data;\r | |
194 | VOID *PdbPointer;\r | |
195 | \r | |
196 | //\r | |
197 | // Find Image Base\r | |
198 | //\r | |
199 | Pe32Data = PeCoffSearchImageBase (CallerIpAddress);\r | |
200 | if (Pe32Data != 0) {\r | |
201 | DEBUG ((DEBUG_ERROR, "It is invoked from the instruction before IP(0x%p)", (VOID *)CallerIpAddress));\r | |
202 | PdbPointer = PeCoffLoaderGetPdbPointer ((VOID *)Pe32Data);\r | |
203 | if (PdbPointer != NULL) {\r | |
204 | DEBUG ((DEBUG_ERROR, " in module (%a)\n", PdbPointer));\r | |
205 | }\r | |
206 | }\r | |
207 | }\r | |
208 | \r | |
209 | /**\r | |
210 | Read information from the CPU save state.\r | |
211 | \r | |
212 | @param This EFI_SMM_CPU_PROTOCOL instance\r | |
213 | @param Width The number of bytes to read from the CPU save state.\r | |
214 | @param Register Specifies the CPU register to read form the save state.\r | |
215 | @param CpuIndex Specifies the zero-based index of the CPU save state.\r | |
216 | @param Buffer Upon return, this holds the CPU register value read from the save state.\r | |
217 | \r | |
218 | @retval EFI_SUCCESS The register was read from Save State\r | |
219 | @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor\r | |
220 | @retval EFI_INVALID_PARAMETER This or Buffer is NULL.\r | |
221 | \r | |
222 | **/\r | |
223 | EFI_STATUS\r | |
224 | EFIAPI\r | |
225 | SmmReadSaveState (\r | |
226 | IN CONST EFI_SMM_CPU_PROTOCOL *This,\r | |
227 | IN UINTN Width,\r | |
228 | IN EFI_SMM_SAVE_STATE_REGISTER Register,\r | |
229 | IN UINTN CpuIndex,\r | |
230 | OUT VOID *Buffer\r | |
231 | )\r | |
232 | {\r | |
233 | EFI_STATUS Status;\r | |
234 | \r | |
235 | //\r | |
236 | // Retrieve pointer to the specified CPU's SMM Save State buffer\r | |
237 | //\r | |
238 | if ((CpuIndex >= gSmst->NumberOfCpus) || (Buffer == NULL)) {\r | |
239 | return EFI_INVALID_PARAMETER;\r | |
240 | }\r | |
241 | \r | |
242 | //\r | |
243 | // The SpeculationBarrier() call here is to ensure the above check for the\r | |
244 | // CpuIndex has been completed before the execution of subsequent codes.\r | |
245 | //\r | |
246 | SpeculationBarrier ();\r | |
247 | \r | |
248 | //\r | |
249 | // Check for special EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID\r | |
250 | //\r | |
251 | if (Register == EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID) {\r | |
252 | //\r | |
253 | // The pseudo-register only supports the 64-bit size specified by Width.\r | |
254 | //\r | |
255 | if (Width != sizeof (UINT64)) {\r | |
256 | return EFI_INVALID_PARAMETER;\r | |
257 | }\r | |
258 | \r | |
259 | //\r | |
260 | // If the processor is in SMM at the time the SMI occurred,\r | |
261 | // the pseudo register value for EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID is returned in Buffer.\r | |
262 | // Otherwise, EFI_NOT_FOUND is returned.\r | |
263 | //\r | |
264 | if (*(mSmmMpSyncData->CpuData[CpuIndex].Present)) {\r | |
265 | *(UINT64 *)Buffer = gSmmCpuPrivate->ProcessorInfo[CpuIndex].ProcessorId;\r | |
266 | return EFI_SUCCESS;\r | |
267 | } else {\r | |
268 | return EFI_NOT_FOUND;\r | |
269 | }\r | |
270 | }\r | |
271 | \r | |
272 | if (!(*(mSmmMpSyncData->CpuData[CpuIndex].Present))) {\r | |
273 | return EFI_INVALID_PARAMETER;\r | |
274 | }\r | |
275 | \r | |
276 | Status = SmmCpuFeaturesReadSaveStateRegister (CpuIndex, Register, Width, Buffer);\r | |
277 | if (Status == EFI_UNSUPPORTED) {\r | |
278 | Status = ReadSaveStateRegister (CpuIndex, Register, Width, Buffer);\r | |
279 | }\r | |
280 | \r | |
281 | return Status;\r | |
282 | }\r | |
283 | \r | |
284 | /**\r | |
285 | Write data to the CPU save state.\r | |
286 | \r | |
287 | @param This EFI_SMM_CPU_PROTOCOL instance\r | |
288 | @param Width The number of bytes to read from the CPU save state.\r | |
289 | @param Register Specifies the CPU register to write to the save state.\r | |
290 | @param CpuIndex Specifies the zero-based index of the CPU save state\r | |
291 | @param Buffer Upon entry, this holds the new CPU register value.\r | |
292 | \r | |
293 | @retval EFI_SUCCESS The register was written from Save State\r | |
294 | @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor\r | |
295 | @retval EFI_INVALID_PARAMETER ProcessorIndex or Width is not correct\r | |
296 | \r | |
297 | **/\r | |
298 | EFI_STATUS\r | |
299 | EFIAPI\r | |
300 | SmmWriteSaveState (\r | |
301 | IN CONST EFI_SMM_CPU_PROTOCOL *This,\r | |
302 | IN UINTN Width,\r | |
303 | IN EFI_SMM_SAVE_STATE_REGISTER Register,\r | |
304 | IN UINTN CpuIndex,\r | |
305 | IN CONST VOID *Buffer\r | |
306 | )\r | |
307 | {\r | |
308 | EFI_STATUS Status;\r | |
309 | \r | |
310 | //\r | |
311 | // Retrieve pointer to the specified CPU's SMM Save State buffer\r | |
312 | //\r | |
313 | if ((CpuIndex >= gSmst->NumberOfCpus) || (Buffer == NULL)) {\r | |
314 | return EFI_INVALID_PARAMETER;\r | |
315 | }\r | |
316 | \r | |
317 | //\r | |
318 | // Writes to EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID are ignored\r | |
319 | //\r | |
320 | if (Register == EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID) {\r | |
321 | return EFI_SUCCESS;\r | |
322 | }\r | |
323 | \r | |
324 | if (!mSmmMpSyncData->CpuData[CpuIndex].Present) {\r | |
325 | return EFI_INVALID_PARAMETER;\r | |
326 | }\r | |
327 | \r | |
328 | Status = SmmCpuFeaturesWriteSaveStateRegister (CpuIndex, Register, Width, Buffer);\r | |
329 | if (Status == EFI_UNSUPPORTED) {\r | |
330 | Status = WriteSaveStateRegister (CpuIndex, Register, Width, Buffer);\r | |
331 | }\r | |
332 | \r | |
333 | return Status;\r | |
334 | }\r | |
335 | \r | |
336 | /**\r | |
337 | C function for SMI handler. To change all processor's SMMBase Register.\r | |
338 | \r | |
339 | **/\r | |
340 | VOID\r | |
341 | EFIAPI\r | |
342 | SmmInitHandler (\r | |
343 | VOID\r | |
344 | )\r | |
345 | {\r | |
346 | UINT32 ApicId;\r | |
347 | UINTN Index;\r | |
348 | \r | |
349 | //\r | |
350 | // Update SMM IDT entries' code segment and load IDT\r | |
351 | //\r | |
352 | AsmWriteIdtr (&gcSmiIdtr);\r | |
353 | ApicId = GetApicId ();\r | |
354 | \r | |
355 | ASSERT (mNumberOfCpus <= mMaxNumberOfCpus);\r | |
356 | \r | |
357 | for (Index = 0; Index < mNumberOfCpus; Index++) {\r | |
358 | if (ApicId == (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId) {\r | |
359 | //\r | |
360 | // Initialize SMM specific features on the currently executing CPU\r | |
361 | //\r | |
362 | SmmCpuFeaturesInitializeProcessor (\r | |
363 | Index,\r | |
364 | mIsBsp,\r | |
365 | gSmmCpuPrivate->ProcessorInfo,\r | |
366 | &mCpuHotPlugData\r | |
367 | );\r | |
368 | \r | |
369 | if (!mSmmS3Flag) {\r | |
370 | //\r | |
371 | // Check XD and BTS features on each processor on normal boot\r | |
372 | //\r | |
373 | CheckFeatureSupported ();\r | |
374 | }\r | |
375 | \r | |
376 | if (mIsBsp) {\r | |
377 | //\r | |
378 | // BSP rebase is already done above.\r | |
379 | // Initialize private data during S3 resume\r | |
380 | //\r | |
381 | InitializeMpSyncData ();\r | |
382 | }\r | |
383 | \r | |
384 | //\r | |
385 | // Hook return after RSM to set SMM re-based flag\r | |
386 | //\r | |
387 | SemaphoreHook (Index, &mRebased[Index]);\r | |
388 | \r | |
389 | return;\r | |
390 | }\r | |
391 | }\r | |
392 | \r | |
393 | ASSERT (FALSE);\r | |
394 | }\r | |
395 | \r | |
396 | /**\r | |
397 | Relocate SmmBases for each processor.\r | |
398 | \r | |
399 | Execute on first boot and all S3 resumes\r | |
400 | \r | |
401 | **/\r | |
402 | VOID\r | |
403 | EFIAPI\r | |
404 | SmmRelocateBases (\r | |
405 | VOID\r | |
406 | )\r | |
407 | {\r | |
408 | UINT8 BakBuf[BACK_BUF_SIZE];\r | |
409 | SMRAM_SAVE_STATE_MAP BakBuf2;\r | |
410 | SMRAM_SAVE_STATE_MAP *CpuStatePtr;\r | |
411 | UINT8 *U8Ptr;\r | |
412 | UINT32 ApicId;\r | |
413 | UINTN Index;\r | |
414 | UINTN BspIndex;\r | |
415 | \r | |
416 | //\r | |
417 | // Make sure the reserved size is large enough for procedure SmmInitTemplate.\r | |
418 | //\r | |
419 | ASSERT (sizeof (BakBuf) >= gcSmmInitSize);\r | |
420 | \r | |
421 | //\r | |
422 | // Patch ASM code template with current CR0, CR3, and CR4 values\r | |
423 | //\r | |
424 | mSmmCr0 = (UINT32)AsmReadCr0 ();\r | |
425 | PatchInstructionX86 (gPatchSmmCr0, mSmmCr0, 4);\r | |
426 | PatchInstructionX86 (gPatchSmmCr3, AsmReadCr3 (), 4);\r | |
427 | mSmmCr4 = (UINT32)AsmReadCr4 ();\r | |
428 | PatchInstructionX86 (gPatchSmmCr4, mSmmCr4 & (~CR4_CET_ENABLE), 4);\r | |
429 | \r | |
430 | //\r | |
431 | // Patch GDTR for SMM base relocation\r | |
432 | //\r | |
433 | gcSmiInitGdtr.Base = gcSmiGdtr.Base;\r | |
434 | gcSmiInitGdtr.Limit = gcSmiGdtr.Limit;\r | |
435 | \r | |
436 | U8Ptr = (UINT8 *)(UINTN)(SMM_DEFAULT_SMBASE + SMM_HANDLER_OFFSET);\r | |
437 | CpuStatePtr = (SMRAM_SAVE_STATE_MAP *)(UINTN)(SMM_DEFAULT_SMBASE + SMRAM_SAVE_STATE_MAP_OFFSET);\r | |
438 | \r | |
439 | //\r | |
440 | // Backup original contents at address 0x38000\r | |
441 | //\r | |
442 | CopyMem (BakBuf, U8Ptr, sizeof (BakBuf));\r | |
443 | CopyMem (&BakBuf2, CpuStatePtr, sizeof (BakBuf2));\r | |
444 | \r | |
445 | //\r | |
446 | // Load image for relocation\r | |
447 | //\r | |
448 | CopyMem (U8Ptr, gcSmmInitTemplate, gcSmmInitSize);\r | |
449 | \r | |
450 | //\r | |
451 | // Retrieve the local APIC ID of current processor\r | |
452 | //\r | |
453 | ApicId = GetApicId ();\r | |
454 | \r | |
455 | //\r | |
456 | // Relocate SM bases for all APs\r | |
457 | // This is APs' 1st SMI - rebase will be done here, and APs' default SMI handler will be overridden by gcSmmInitTemplate\r | |
458 | //\r | |
459 | mIsBsp = FALSE;\r | |
460 | BspIndex = (UINTN)-1;\r | |
461 | for (Index = 0; Index < mNumberOfCpus; Index++) {\r | |
462 | mRebased[Index] = FALSE;\r | |
463 | if (ApicId != (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId) {\r | |
464 | SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId);\r | |
465 | //\r | |
466 | // Wait for this AP to finish its 1st SMI\r | |
467 | //\r | |
468 | while (!mRebased[Index]) {\r | |
469 | }\r | |
470 | } else {\r | |
471 | //\r | |
472 | // BSP will be Relocated later\r | |
473 | //\r | |
474 | BspIndex = Index;\r | |
475 | }\r | |
476 | }\r | |
477 | \r | |
478 | //\r | |
479 | // Relocate BSP's SMM base\r | |
480 | //\r | |
481 | ASSERT (BspIndex != (UINTN)-1);\r | |
482 | mIsBsp = TRUE;\r | |
483 | SendSmiIpi (ApicId);\r | |
484 | //\r | |
485 | // Wait for the BSP to finish its 1st SMI\r | |
486 | //\r | |
487 | while (!mRebased[BspIndex]) {\r | |
488 | }\r | |
489 | \r | |
490 | //\r | |
491 | // Restore contents at address 0x38000\r | |
492 | //\r | |
493 | CopyMem (CpuStatePtr, &BakBuf2, sizeof (BakBuf2));\r | |
494 | CopyMem (U8Ptr, BakBuf, sizeof (BakBuf));\r | |
495 | }\r | |
496 | \r | |
497 | /**\r | |
498 | SMM Ready To Lock event notification handler.\r | |
499 | \r | |
500 | The CPU S3 data is copied to SMRAM for security and mSmmReadyToLock is set to\r | |
501 | perform additional lock actions that must be performed from SMM on the next SMI.\r | |
502 | \r | |
503 | @param[in] Protocol Points to the protocol's unique identifier.\r | |
504 | @param[in] Interface Points to the interface instance.\r | |
505 | @param[in] Handle The handle on which the interface was installed.\r | |
506 | \r | |
507 | @retval EFI_SUCCESS Notification handler runs successfully.\r | |
508 | **/\r | |
509 | EFI_STATUS\r | |
510 | EFIAPI\r | |
511 | SmmReadyToLockEventNotify (\r | |
512 | IN CONST EFI_GUID *Protocol,\r | |
513 | IN VOID *Interface,\r | |
514 | IN EFI_HANDLE Handle\r | |
515 | )\r | |
516 | {\r | |
517 | GetAcpiCpuData ();\r | |
518 | \r | |
519 | //\r | |
520 | // Cache a copy of UEFI memory map before we start profiling feature.\r | |
521 | //\r | |
522 | GetUefiMemoryMap ();\r | |
523 | \r | |
524 | //\r | |
525 | // Set SMM ready to lock flag and return\r | |
526 | //\r | |
527 | mSmmReadyToLock = TRUE;\r | |
528 | return EFI_SUCCESS;\r | |
529 | }\r | |
530 | \r | |
531 | /**\r | |
532 | The module Entry Point of the CPU SMM driver.\r | |
533 | \r | |
534 | @param ImageHandle The firmware allocated handle for the EFI image.\r | |
535 | @param SystemTable A pointer to the EFI System Table.\r | |
536 | \r | |
537 | @retval EFI_SUCCESS The entry point is executed successfully.\r | |
538 | @retval Other Some error occurs when executing this entry point.\r | |
539 | \r | |
540 | **/\r | |
541 | EFI_STATUS\r | |
542 | EFIAPI\r | |
543 | PiCpuSmmEntry (\r | |
544 | IN EFI_HANDLE ImageHandle,\r | |
545 | IN EFI_SYSTEM_TABLE *SystemTable\r | |
546 | )\r | |
547 | {\r | |
548 | EFI_STATUS Status;\r | |
549 | EFI_MP_SERVICES_PROTOCOL *MpServices;\r | |
550 | UINTN NumberOfEnabledProcessors;\r | |
551 | UINTN Index;\r | |
552 | VOID *Buffer;\r | |
553 | UINTN BufferPages;\r | |
554 | UINTN TileCodeSize;\r | |
555 | UINTN TileDataSize;\r | |
556 | UINTN TileSize;\r | |
557 | UINT8 *Stacks;\r | |
558 | VOID *Registration;\r | |
559 | UINT32 RegEax;\r | |
560 | UINT32 RegEbx;\r | |
561 | UINT32 RegEcx;\r | |
562 | UINT32 RegEdx;\r | |
563 | UINTN FamilyId;\r | |
564 | UINTN ModelId;\r | |
565 | UINT32 Cr3;\r | |
566 | \r | |
567 | //\r | |
568 | // Initialize address fixup\r | |
569 | //\r | |
570 | PiSmmCpuSmmInitFixupAddress ();\r | |
571 | PiSmmCpuSmiEntryFixupAddress ();\r | |
572 | \r | |
573 | //\r | |
574 | // Initialize Debug Agent to support source level debug in SMM code\r | |
575 | //\r | |
576 | InitializeDebugAgent (DEBUG_AGENT_INIT_SMM, NULL, NULL);\r | |
577 | \r | |
578 | //\r | |
579 | // Report the start of CPU SMM initialization.\r | |
580 | //\r | |
581 | REPORT_STATUS_CODE (\r | |
582 | EFI_PROGRESS_CODE,\r | |
583 | EFI_COMPUTING_UNIT_HOST_PROCESSOR | EFI_CU_HP_PC_SMM_INIT\r | |
584 | );\r | |
585 | \r | |
586 | //\r | |
587 | // Find out SMRR Base and SMRR Size\r | |
588 | //\r | |
589 | FindSmramInfo (&mCpuHotPlugData.SmrrBase, &mCpuHotPlugData.SmrrSize);\r | |
590 | \r | |
591 | //\r | |
592 | // Get MP Services Protocol\r | |
593 | //\r | |
594 | Status = SystemTable->BootServices->LocateProtocol (&gEfiMpServiceProtocolGuid, NULL, (VOID **)&MpServices);\r | |
595 | ASSERT_EFI_ERROR (Status);\r | |
596 | \r | |
597 | //\r | |
598 | // Use MP Services Protocol to retrieve the number of processors and number of enabled processors\r | |
599 | //\r | |
600 | Status = MpServices->GetNumberOfProcessors (MpServices, &mNumberOfCpus, &NumberOfEnabledProcessors);\r | |
601 | ASSERT_EFI_ERROR (Status);\r | |
602 | ASSERT (mNumberOfCpus <= PcdGet32 (PcdCpuMaxLogicalProcessorNumber));\r | |
603 | \r | |
604 | //\r | |
605 | // If support CPU hot plug, PcdCpuSmmEnableBspElection should be set to TRUE.\r | |
606 | // A constant BSP index makes no sense because it may be hot removed.\r | |
607 | //\r | |
608 | DEBUG_CODE_BEGIN ();\r | |
609 | if (FeaturePcdGet (PcdCpuHotPlugSupport)) {\r | |
610 | ASSERT (FeaturePcdGet (PcdCpuSmmEnableBspElection));\r | |
611 | }\r | |
612 | \r | |
613 | DEBUG_CODE_END ();\r | |
614 | \r | |
615 | //\r | |
616 | // Save the PcdCpuSmmCodeAccessCheckEnable value into a global variable.\r | |
617 | //\r | |
618 | mSmmCodeAccessCheckEnable = PcdGetBool (PcdCpuSmmCodeAccessCheckEnable);\r | |
619 | DEBUG ((DEBUG_INFO, "PcdCpuSmmCodeAccessCheckEnable = %d\n", mSmmCodeAccessCheckEnable));\r | |
620 | \r | |
621 | //\r | |
622 | // Save the PcdPteMemoryEncryptionAddressOrMask value into a global variable.\r | |
623 | // Make sure AddressEncMask is contained to smallest supported address field.\r | |
624 | //\r | |
625 | mAddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;\r | |
626 | DEBUG ((DEBUG_INFO, "mAddressEncMask = 0x%lx\n", mAddressEncMask));\r | |
627 | \r | |
628 | //\r | |
629 | // If support CPU hot plug, we need to allocate resources for possibly hot-added processors\r | |
630 | //\r | |
631 | if (FeaturePcdGet (PcdCpuHotPlugSupport)) {\r | |
632 | mMaxNumberOfCpus = PcdGet32 (PcdCpuMaxLogicalProcessorNumber);\r | |
633 | } else {\r | |
634 | mMaxNumberOfCpus = mNumberOfCpus;\r | |
635 | }\r | |
636 | \r | |
637 | gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus = mMaxNumberOfCpus;\r | |
638 | \r | |
639 | //\r | |
640 | // The CPU save state and code for the SMI entry point are tiled within an SMRAM\r | |
641 | // allocated buffer. The minimum size of this buffer for a uniprocessor system\r | |
642 | // is 32 KB, because the entry point is SMBASE + 32KB, and CPU save state area\r | |
643 | // just below SMBASE + 64KB. If more than one CPU is present in the platform,\r | |
644 | // then the SMI entry point and the CPU save state areas can be tiles to minimize\r | |
645 | // the total amount SMRAM required for all the CPUs. The tile size can be computed\r | |
646 | // by adding the // CPU save state size, any extra CPU specific context, and\r | |
647 | // the size of code that must be placed at the SMI entry point to transfer\r | |
648 | // control to a C function in the native SMM execution mode. This size is\r | |
649 | // rounded up to the nearest power of 2 to give the tile size for a each CPU.\r | |
650 | // The total amount of memory required is the maximum number of CPUs that\r | |
651 | // platform supports times the tile size. The picture below shows the tiling,\r | |
652 | // where m is the number of tiles that fit in 32KB.\r | |
653 | //\r | |
654 | // +-----------------------------+ <-- 2^n offset from Base of allocated buffer\r | |
655 | // | CPU m+1 Save State |\r | |
656 | // +-----------------------------+\r | |
657 | // | CPU m+1 Extra Data |\r | |
658 | // +-----------------------------+\r | |
659 | // | Padding |\r | |
660 | // +-----------------------------+\r | |
661 | // | CPU 2m SMI Entry |\r | |
662 | // +#############################+ <-- Base of allocated buffer + 64 KB\r | |
663 | // | CPU m-1 Save State |\r | |
664 | // +-----------------------------+\r | |
665 | // | CPU m-1 Extra Data |\r | |
666 | // +-----------------------------+\r | |
667 | // | Padding |\r | |
668 | // +-----------------------------+\r | |
669 | // | CPU 2m-1 SMI Entry |\r | |
670 | // +=============================+ <-- 2^n offset from Base of allocated buffer\r | |
671 | // | . . . . . . . . . . . . |\r | |
672 | // +=============================+ <-- 2^n offset from Base of allocated buffer\r | |
673 | // | CPU 2 Save State |\r | |
674 | // +-----------------------------+\r | |
675 | // | CPU 2 Extra Data |\r | |
676 | // +-----------------------------+\r | |
677 | // | Padding |\r | |
678 | // +-----------------------------+\r | |
679 | // | CPU m+1 SMI Entry |\r | |
680 | // +=============================+ <-- Base of allocated buffer + 32 KB\r | |
681 | // | CPU 1 Save State |\r | |
682 | // +-----------------------------+\r | |
683 | // | CPU 1 Extra Data |\r | |
684 | // +-----------------------------+\r | |
685 | // | Padding |\r | |
686 | // +-----------------------------+\r | |
687 | // | CPU m SMI Entry |\r | |
688 | // +#############################+ <-- Base of allocated buffer + 32 KB == CPU 0 SMBASE + 64 KB\r | |
689 | // | CPU 0 Save State |\r | |
690 | // +-----------------------------+\r | |
691 | // | CPU 0 Extra Data |\r | |
692 | // +-----------------------------+\r | |
693 | // | Padding |\r | |
694 | // +-----------------------------+\r | |
695 | // | CPU m-1 SMI Entry |\r | |
696 | // +=============================+ <-- 2^n offset from Base of allocated buffer\r | |
697 | // | . . . . . . . . . . . . |\r | |
698 | // +=============================+ <-- 2^n offset from Base of allocated buffer\r | |
699 | // | Padding |\r | |
700 | // +-----------------------------+\r | |
701 | // | CPU 1 SMI Entry |\r | |
702 | // +=============================+ <-- 2^n offset from Base of allocated buffer\r | |
703 | // | Padding |\r | |
704 | // +-----------------------------+\r | |
705 | // | CPU 0 SMI Entry |\r | |
706 | // +#############################+ <-- Base of allocated buffer == CPU 0 SMBASE + 32 KB\r | |
707 | //\r | |
708 | \r | |
709 | //\r | |
710 | // Retrieve CPU Family\r | |
711 | //\r | |
712 | AsmCpuid (CPUID_VERSION_INFO, &RegEax, NULL, NULL, NULL);\r | |
713 | FamilyId = (RegEax >> 8) & 0xf;\r | |
714 | ModelId = (RegEax >> 4) & 0xf;\r | |
715 | if ((FamilyId == 0x06) || (FamilyId == 0x0f)) {\r | |
716 | ModelId = ModelId | ((RegEax >> 12) & 0xf0);\r | |
717 | }\r | |
718 | \r | |
719 | RegEdx = 0;\r | |
720 | AsmCpuid (CPUID_EXTENDED_FUNCTION, &RegEax, NULL, NULL, NULL);\r | |
721 | if (RegEax >= CPUID_EXTENDED_CPU_SIG) {\r | |
722 | AsmCpuid (CPUID_EXTENDED_CPU_SIG, NULL, NULL, NULL, &RegEdx);\r | |
723 | }\r | |
724 | \r | |
725 | //\r | |
726 | // Determine the mode of the CPU at the time an SMI occurs\r | |
727 | // Intel(R) 64 and IA-32 Architectures Software Developer's Manual\r | |
728 | // Volume 3C, Section 34.4.1.1\r | |
729 | //\r | |
730 | mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT;\r | |
731 | if ((RegEdx & BIT29) != 0) {\r | |
732 | mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_64BIT;\r | |
733 | }\r | |
734 | \r | |
735 | if (FamilyId == 0x06) {\r | |
736 | if ((ModelId == 0x17) || (ModelId == 0x0f) || (ModelId == 0x1c)) {\r | |
737 | mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_64BIT;\r | |
738 | }\r | |
739 | }\r | |
740 | \r | |
741 | DEBUG ((DEBUG_INFO, "PcdControlFlowEnforcementPropertyMask = %d\n", PcdGet32 (PcdControlFlowEnforcementPropertyMask)));\r | |
742 | if (PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) {\r | |
743 | AsmCpuid (CPUID_SIGNATURE, &RegEax, NULL, NULL, NULL);\r | |
744 | if (RegEax >= CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS) {\r | |
745 | AsmCpuidEx (CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS, CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_SUB_LEAF_INFO, NULL, NULL, &RegEcx, &RegEdx);\r | |
746 | DEBUG ((DEBUG_INFO, "CPUID[7/0] ECX - 0x%08x\n", RegEcx));\r | |
747 | DEBUG ((DEBUG_INFO, " CET_SS - 0x%08x\n", RegEcx & CPUID_CET_SS));\r | |
748 | DEBUG ((DEBUG_INFO, " CET_IBT - 0x%08x\n", RegEdx & CPUID_CET_IBT));\r | |
749 | if ((RegEcx & CPUID_CET_SS) == 0) {\r | |
750 | mCetSupported = FALSE;\r | |
751 | PatchInstructionX86 (mPatchCetSupported, mCetSupported, 1);\r | |
752 | }\r | |
753 | \r | |
754 | if (mCetSupported) {\r | |
755 | AsmCpuidEx (CPUID_EXTENDED_STATE, CPUID_EXTENDED_STATE_SUB_LEAF, NULL, &RegEbx, &RegEcx, NULL);\r | |
756 | DEBUG ((DEBUG_INFO, "CPUID[D/1] EBX - 0x%08x, ECX - 0x%08x\n", RegEbx, RegEcx));\r | |
757 | AsmCpuidEx (CPUID_EXTENDED_STATE, 11, &RegEax, NULL, &RegEcx, NULL);\r | |
758 | DEBUG ((DEBUG_INFO, "CPUID[D/11] EAX - 0x%08x, ECX - 0x%08x\n", RegEax, RegEcx));\r | |
759 | AsmCpuidEx (CPUID_EXTENDED_STATE, 12, &RegEax, NULL, &RegEcx, NULL);\r | |
760 | DEBUG ((DEBUG_INFO, "CPUID[D/12] EAX - 0x%08x, ECX - 0x%08x\n", RegEax, RegEcx));\r | |
761 | }\r | |
762 | } else {\r | |
763 | mCetSupported = FALSE;\r | |
764 | PatchInstructionX86 (mPatchCetSupported, mCetSupported, 1);\r | |
765 | }\r | |
766 | } else {\r | |
767 | mCetSupported = FALSE;\r | |
768 | PatchInstructionX86 (mPatchCetSupported, mCetSupported, 1);\r | |
769 | }\r | |
770 | \r | |
771 | //\r | |
772 | // Compute tile size of buffer required to hold the CPU SMRAM Save State Map, extra CPU\r | |
773 | // specific context start starts at SMBASE + SMM_PSD_OFFSET, and the SMI entry point.\r | |
774 | // This size is rounded up to nearest power of 2.\r | |
775 | //\r | |
776 | TileCodeSize = GetSmiHandlerSize ();\r | |
777 | TileCodeSize = ALIGN_VALUE (TileCodeSize, SIZE_4KB);\r | |
778 | TileDataSize = (SMRAM_SAVE_STATE_MAP_OFFSET - SMM_PSD_OFFSET) + sizeof (SMRAM_SAVE_STATE_MAP);\r | |
779 | TileDataSize = ALIGN_VALUE (TileDataSize, SIZE_4KB);\r | |
780 | TileSize = TileDataSize + TileCodeSize - 1;\r | |
781 | TileSize = 2 * GetPowerOfTwo32 ((UINT32)TileSize);\r | |
782 | DEBUG ((DEBUG_INFO, "SMRAM TileSize = 0x%08x (0x%08x, 0x%08x)\n", TileSize, TileCodeSize, TileDataSize));\r | |
783 | \r | |
784 | //\r | |
785 | // If the TileSize is larger than space available for the SMI Handler of\r | |
786 | // CPU[i], the extra CPU specific context of CPU[i+1], and the SMRAM Save\r | |
787 | // State Map of CPU[i+1], then ASSERT(). If this ASSERT() is triggered, then\r | |
788 | // the SMI Handler size must be reduced or the size of the extra CPU specific\r | |
789 | // context must be reduced.\r | |
790 | //\r | |
791 | ASSERT (TileSize <= (SMRAM_SAVE_STATE_MAP_OFFSET + sizeof (SMRAM_SAVE_STATE_MAP) - SMM_HANDLER_OFFSET));\r | |
792 | \r | |
793 | //\r | |
794 | // Allocate buffer for all of the tiles.\r | |
795 | //\r | |
796 | // Intel(R) 64 and IA-32 Architectures Software Developer's Manual\r | |
797 | // Volume 3C, Section 34.11 SMBASE Relocation\r | |
798 | // For Pentium and Intel486 processors, the SMBASE values must be\r | |
799 | // aligned on a 32-KByte boundary or the processor will enter shutdown\r | |
800 | // state during the execution of a RSM instruction.\r | |
801 | //\r | |
802 | // Intel486 processors: FamilyId is 4\r | |
803 | // Pentium processors : FamilyId is 5\r | |
804 | //\r | |
805 | BufferPages = EFI_SIZE_TO_PAGES (SIZE_32KB + TileSize * (mMaxNumberOfCpus - 1));\r | |
806 | if ((FamilyId == 4) || (FamilyId == 5)) {\r | |
807 | Buffer = AllocateAlignedCodePages (BufferPages, SIZE_32KB);\r | |
808 | } else {\r | |
809 | Buffer = AllocateAlignedCodePages (BufferPages, SIZE_4KB);\r | |
810 | }\r | |
811 | \r | |
812 | ASSERT (Buffer != NULL);\r | |
813 | DEBUG ((DEBUG_INFO, "SMRAM SaveState Buffer (0x%08x, 0x%08x)\n", Buffer, EFI_PAGES_TO_SIZE (BufferPages)));\r | |
814 | \r | |
815 | //\r | |
816 | // Allocate buffer for pointers to array in SMM_CPU_PRIVATE_DATA.\r | |
817 | //\r | |
818 | gSmmCpuPrivate->ProcessorInfo = (EFI_PROCESSOR_INFORMATION *)AllocatePool (sizeof (EFI_PROCESSOR_INFORMATION) * mMaxNumberOfCpus);\r | |
819 | ASSERT (gSmmCpuPrivate->ProcessorInfo != NULL);\r | |
820 | \r | |
821 | gSmmCpuPrivate->Operation = (SMM_CPU_OPERATION *)AllocatePool (sizeof (SMM_CPU_OPERATION) * mMaxNumberOfCpus);\r | |
822 | ASSERT (gSmmCpuPrivate->Operation != NULL);\r | |
823 | \r | |
824 | gSmmCpuPrivate->CpuSaveStateSize = (UINTN *)AllocatePool (sizeof (UINTN) * mMaxNumberOfCpus);\r | |
825 | ASSERT (gSmmCpuPrivate->CpuSaveStateSize != NULL);\r | |
826 | \r | |
827 | gSmmCpuPrivate->CpuSaveState = (VOID **)AllocatePool (sizeof (VOID *) * mMaxNumberOfCpus);\r | |
828 | ASSERT (gSmmCpuPrivate->CpuSaveState != NULL);\r | |
829 | \r | |
830 | mSmmCpuPrivateData.SmmCoreEntryContext.CpuSaveStateSize = gSmmCpuPrivate->CpuSaveStateSize;\r | |
831 | mSmmCpuPrivateData.SmmCoreEntryContext.CpuSaveState = gSmmCpuPrivate->CpuSaveState;\r | |
832 | \r | |
833 | //\r | |
834 | // Allocate buffer for pointers to array in CPU_HOT_PLUG_DATA.\r | |
835 | //\r | |
836 | mCpuHotPlugData.ApicId = (UINT64 *)AllocatePool (sizeof (UINT64) * mMaxNumberOfCpus);\r | |
837 | ASSERT (mCpuHotPlugData.ApicId != NULL);\r | |
838 | mCpuHotPlugData.SmBase = (UINTN *)AllocatePool (sizeof (UINTN) * mMaxNumberOfCpus);\r | |
839 | ASSERT (mCpuHotPlugData.SmBase != NULL);\r | |
840 | mCpuHotPlugData.ArrayLength = (UINT32)mMaxNumberOfCpus;\r | |
841 | \r | |
842 | //\r | |
843 | // Retrieve APIC ID of each enabled processor from the MP Services protocol.\r | |
844 | // Also compute the SMBASE address, CPU Save State address, and CPU Save state\r | |
845 | // size for each CPU in the platform\r | |
846 | //\r | |
847 | for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r | |
848 | mCpuHotPlugData.SmBase[Index] = (UINTN)Buffer + Index * TileSize - SMM_HANDLER_OFFSET;\r | |
849 | gSmmCpuPrivate->CpuSaveStateSize[Index] = sizeof (SMRAM_SAVE_STATE_MAP);\r | |
850 | gSmmCpuPrivate->CpuSaveState[Index] = (VOID *)(mCpuHotPlugData.SmBase[Index] + SMRAM_SAVE_STATE_MAP_OFFSET);\r | |
851 | gSmmCpuPrivate->Operation[Index] = SmmCpuNone;\r | |
852 | \r | |
853 | if (Index < mNumberOfCpus) {\r | |
854 | Status = MpServices->GetProcessorInfo (MpServices, Index, &gSmmCpuPrivate->ProcessorInfo[Index]);\r | |
855 | ASSERT_EFI_ERROR (Status);\r | |
856 | mCpuHotPlugData.ApicId[Index] = gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId;\r | |
857 | \r | |
858 | DEBUG ((\r | |
859 | DEBUG_INFO,\r | |
860 | "CPU[%03x] APIC ID=%04x SMBASE=%08x SaveState=%08x Size=%08x\n",\r | |
861 | Index,\r | |
862 | (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId,\r | |
863 | mCpuHotPlugData.SmBase[Index],\r | |
864 | gSmmCpuPrivate->CpuSaveState[Index],\r | |
865 | gSmmCpuPrivate->CpuSaveStateSize[Index]\r | |
866 | ));\r | |
867 | } else {\r | |
868 | gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId = INVALID_APIC_ID;\r | |
869 | mCpuHotPlugData.ApicId[Index] = INVALID_APIC_ID;\r | |
870 | }\r | |
871 | }\r | |
872 | \r | |
873 | //\r | |
874 | // Allocate SMI stacks for all processors.\r | |
875 | //\r | |
876 | mSmmStackSize = EFI_PAGES_TO_SIZE (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmStackSize)));\r | |
877 | if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r | |
878 | //\r | |
879 | // SMM Stack Guard Enabled\r | |
880 | // 2 more pages is allocated for each processor, one is guard page and the other is known good stack.\r | |
881 | //\r | |
882 | // +--------------------------------------------------+-----+--------------------------------------------------+\r | |
883 | // | Known Good Stack | Guard Page | SMM Stack | ... | Known Good Stack | Guard Page | SMM Stack |\r | |
884 | // +--------------------------------------------------+-----+--------------------------------------------------+\r | |
885 | // | 4K | 4K PcdCpuSmmStackSize| | 4K | 4K PcdCpuSmmStackSize|\r | |
886 | // |<---------------- mSmmStackSize ----------------->| |<---------------- mSmmStackSize ----------------->|\r | |
887 | // | | | |\r | |
888 | // |<------------------ Processor 0 ----------------->| |<------------------ Processor n ----------------->|\r | |
889 | //\r | |
890 | mSmmStackSize += EFI_PAGES_TO_SIZE (2);\r | |
891 | }\r | |
892 | \r | |
893 | mSmmShadowStackSize = 0;\r | |
894 | if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) && mCetSupported) {\r | |
895 | mSmmShadowStackSize = EFI_PAGES_TO_SIZE (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmShadowStackSize)));\r | |
896 | \r | |
897 | if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r | |
898 | //\r | |
899 | // SMM Stack Guard Enabled\r | |
900 | // Append Shadow Stack after normal stack\r | |
901 | // 2 more pages is allocated for each processor, one is guard page and the other is known good shadow stack.\r | |
902 | //\r | |
903 | // |= Stacks\r | |
904 | // +--------------------------------------------------+---------------------------------------------------------------+\r | |
905 | // | Known Good Stack | Guard Page | SMM Stack | Known Good Shadow Stack | Guard Page | SMM Shadow Stack |\r | |
906 | // +--------------------------------------------------+---------------------------------------------------------------+\r | |
907 | // | 4K | 4K |PcdCpuSmmStackSize| 4K | 4K |PcdCpuSmmShadowStackSize|\r | |
908 | // |<---------------- mSmmStackSize ----------------->|<--------------------- mSmmShadowStackSize ------------------->|\r | |
909 | // | |\r | |
910 | // |<-------------------------------------------- Processor N ------------------------------------------------------->|\r | |
911 | //\r | |
912 | mSmmShadowStackSize += EFI_PAGES_TO_SIZE (2);\r | |
913 | } else {\r | |
914 | //\r | |
915 | // SMM Stack Guard Disabled (Known Good Stack is still required for potential stack switch.)\r | |
916 | // Append Shadow Stack after normal stack with 1 more page as known good shadow stack.\r | |
917 | // 1 more pages is allocated for each processor, it is known good stack.\r | |
918 | //\r | |
919 | //\r | |
920 | // |= Stacks\r | |
921 | // +-------------------------------------+--------------------------------------------------+\r | |
922 | // | Known Good Stack | SMM Stack | Known Good Shadow Stack | SMM Shadow Stack |\r | |
923 | // +-------------------------------------+--------------------------------------------------+\r | |
924 | // | 4K |PcdCpuSmmStackSize| 4K |PcdCpuSmmShadowStackSize|\r | |
925 | // |<---------- mSmmStackSize ---------->|<--------------- mSmmShadowStackSize ------------>|\r | |
926 | // | |\r | |
927 | // |<-------------------------------- Processor N ----------------------------------------->|\r | |
928 | //\r | |
929 | mSmmShadowStackSize += EFI_PAGES_TO_SIZE (1);\r | |
930 | mSmmStackSize += EFI_PAGES_TO_SIZE (1);\r | |
931 | }\r | |
932 | }\r | |
933 | \r | |
934 | Stacks = (UINT8 *)AllocatePages (gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * (EFI_SIZE_TO_PAGES (mSmmStackSize + mSmmShadowStackSize)));\r | |
935 | ASSERT (Stacks != NULL);\r | |
936 | mSmmStackArrayBase = (UINTN)Stacks;\r | |
937 | mSmmStackArrayEnd = mSmmStackArrayBase + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * (mSmmStackSize + mSmmShadowStackSize) - 1;\r | |
938 | \r | |
939 | DEBUG ((DEBUG_INFO, "Stacks - 0x%x\n", Stacks));\r | |
940 | DEBUG ((DEBUG_INFO, "mSmmStackSize - 0x%x\n", mSmmStackSize));\r | |
941 | DEBUG ((DEBUG_INFO, "PcdCpuSmmStackGuard - 0x%x\n", FeaturePcdGet (PcdCpuSmmStackGuard)));\r | |
942 | if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) && mCetSupported) {\r | |
943 | DEBUG ((DEBUG_INFO, "mSmmShadowStackSize - 0x%x\n", mSmmShadowStackSize));\r | |
944 | }\r | |
945 | \r | |
946 | //\r | |
947 | // Set SMI stack for SMM base relocation\r | |
948 | //\r | |
949 | PatchInstructionX86 (\r | |
950 | gPatchSmmInitStack,\r | |
951 | (UINTN)(Stacks + mSmmStackSize - sizeof (UINTN)),\r | |
952 | sizeof (UINTN)\r | |
953 | );\r | |
954 | \r | |
955 | //\r | |
956 | // Initialize IDT\r | |
957 | //\r | |
958 | InitializeSmmIdt ();\r | |
959 | \r | |
960 | //\r | |
961 | // Relocate SMM Base addresses to the ones allocated from SMRAM\r | |
962 | //\r | |
963 | mRebased = (BOOLEAN *)AllocateZeroPool (sizeof (BOOLEAN) * mMaxNumberOfCpus);\r | |
964 | ASSERT (mRebased != NULL);\r | |
965 | SmmRelocateBases ();\r | |
966 | \r | |
967 | //\r | |
968 | // Call hook for BSP to perform extra actions in normal mode after all\r | |
969 | // SMM base addresses have been relocated on all CPUs\r | |
970 | //\r | |
971 | SmmCpuFeaturesSmmRelocationComplete ();\r | |
972 | \r | |
973 | DEBUG ((DEBUG_INFO, "mXdSupported - 0x%x\n", mXdSupported));\r | |
974 | \r | |
975 | //\r | |
976 | // SMM Time initialization\r | |
977 | //\r | |
978 | InitializeSmmTimer ();\r | |
979 | \r | |
980 | //\r | |
981 | // Initialize MP globals\r | |
982 | //\r | |
983 | Cr3 = InitializeMpServiceData (Stacks, mSmmStackSize, mSmmShadowStackSize);\r | |
984 | \r | |
985 | if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) && mCetSupported) {\r | |
986 | for (Index = 0; Index < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; Index++) {\r | |
987 | SetShadowStack (\r | |
988 | Cr3,\r | |
989 | (EFI_PHYSICAL_ADDRESS)(UINTN)Stacks + mSmmStackSize + (mSmmStackSize + mSmmShadowStackSize) * Index,\r | |
990 | mSmmShadowStackSize\r | |
991 | );\r | |
992 | if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r | |
993 | SetNotPresentPage (\r | |
994 | Cr3,\r | |
995 | (EFI_PHYSICAL_ADDRESS)(UINTN)Stacks + mSmmStackSize + EFI_PAGES_TO_SIZE (1) + (mSmmStackSize + mSmmShadowStackSize) * Index,\r | |
996 | EFI_PAGES_TO_SIZE (1)\r | |
997 | );\r | |
998 | }\r | |
999 | }\r | |
1000 | }\r | |
1001 | \r | |
1002 | //\r | |
1003 | // Fill in SMM Reserved Regions\r | |
1004 | //\r | |
1005 | gSmmCpuPrivate->SmmReservedSmramRegion[0].SmramReservedStart = 0;\r | |
1006 | gSmmCpuPrivate->SmmReservedSmramRegion[0].SmramReservedSize = 0;\r | |
1007 | \r | |
1008 | //\r | |
1009 | // Install the SMM Configuration Protocol onto a new handle on the handle database.\r | |
1010 | // The entire SMM Configuration Protocol is allocated from SMRAM, so only a pointer\r | |
1011 | // to an SMRAM address will be present in the handle database\r | |
1012 | //\r | |
1013 | Status = SystemTable->BootServices->InstallMultipleProtocolInterfaces (\r | |
1014 | &gSmmCpuPrivate->SmmCpuHandle,\r | |
1015 | &gEfiSmmConfigurationProtocolGuid,\r | |
1016 | &gSmmCpuPrivate->SmmConfiguration,\r | |
1017 | NULL\r | |
1018 | );\r | |
1019 | ASSERT_EFI_ERROR (Status);\r | |
1020 | \r | |
1021 | //\r | |
1022 | // Install the SMM CPU Protocol into SMM protocol database\r | |
1023 | //\r | |
1024 | Status = gSmst->SmmInstallProtocolInterface (\r | |
1025 | &mSmmCpuHandle,\r | |
1026 | &gEfiSmmCpuProtocolGuid,\r | |
1027 | EFI_NATIVE_INTERFACE,\r | |
1028 | &mSmmCpu\r | |
1029 | );\r | |
1030 | ASSERT_EFI_ERROR (Status);\r | |
1031 | \r | |
1032 | //\r | |
1033 | // Install the SMM Memory Attribute Protocol into SMM protocol database\r | |
1034 | //\r | |
1035 | Status = gSmst->SmmInstallProtocolInterface (\r | |
1036 | &mSmmCpuHandle,\r | |
1037 | &gEdkiiSmmMemoryAttributeProtocolGuid,\r | |
1038 | EFI_NATIVE_INTERFACE,\r | |
1039 | &mSmmMemoryAttribute\r | |
1040 | );\r | |
1041 | ASSERT_EFI_ERROR (Status);\r | |
1042 | \r | |
1043 | //\r | |
1044 | // Initialize global buffer for MM MP.\r | |
1045 | //\r | |
1046 | InitializeDataForMmMp ();\r | |
1047 | \r | |
1048 | //\r | |
1049 | // Install the SMM Mp Protocol into SMM protocol database\r | |
1050 | //\r | |
1051 | Status = gSmst->SmmInstallProtocolInterface (\r | |
1052 | &mSmmCpuHandle,\r | |
1053 | &gEfiMmMpProtocolGuid,\r | |
1054 | EFI_NATIVE_INTERFACE,\r | |
1055 | &mSmmMp\r | |
1056 | );\r | |
1057 | ASSERT_EFI_ERROR (Status);\r | |
1058 | \r | |
1059 | //\r | |
1060 | // Expose address of CPU Hot Plug Data structure if CPU hot plug is supported.\r | |
1061 | //\r | |
1062 | if (FeaturePcdGet (PcdCpuHotPlugSupport)) {\r | |
1063 | Status = PcdSet64S (PcdCpuHotPlugDataAddress, (UINT64)(UINTN)&mCpuHotPlugData);\r | |
1064 | ASSERT_EFI_ERROR (Status);\r | |
1065 | }\r | |
1066 | \r | |
1067 | //\r | |
1068 | // Initialize SMM CPU Services Support\r | |
1069 | //\r | |
1070 | Status = InitializeSmmCpuServices (mSmmCpuHandle);\r | |
1071 | ASSERT_EFI_ERROR (Status);\r | |
1072 | \r | |
1073 | //\r | |
1074 | // register SMM Ready To Lock Protocol notification\r | |
1075 | //\r | |
1076 | Status = gSmst->SmmRegisterProtocolNotify (\r | |
1077 | &gEfiSmmReadyToLockProtocolGuid,\r | |
1078 | SmmReadyToLockEventNotify,\r | |
1079 | &Registration\r | |
1080 | );\r | |
1081 | ASSERT_EFI_ERROR (Status);\r | |
1082 | \r | |
1083 | //\r | |
1084 | // Initialize SMM Profile feature\r | |
1085 | //\r | |
1086 | InitSmmProfile (Cr3);\r | |
1087 | \r | |
1088 | GetAcpiS3EnableFlag ();\r | |
1089 | InitSmmS3ResumeState (Cr3);\r | |
1090 | \r | |
1091 | DEBUG ((DEBUG_INFO, "SMM CPU Module exit from SMRAM with EFI_SUCCESS\n"));\r | |
1092 | \r | |
1093 | return EFI_SUCCESS;\r | |
1094 | }\r | |
1095 | \r | |
1096 | /**\r | |
1097 | \r | |
1098 | Find out SMRAM information including SMRR base and SMRR size.\r | |
1099 | \r | |
1100 | @param SmrrBase SMRR base\r | |
1101 | @param SmrrSize SMRR size\r | |
1102 | \r | |
1103 | **/\r | |
1104 | VOID\r | |
1105 | FindSmramInfo (\r | |
1106 | OUT UINT32 *SmrrBase,\r | |
1107 | OUT UINT32 *SmrrSize\r | |
1108 | )\r | |
1109 | {\r | |
1110 | EFI_STATUS Status;\r | |
1111 | UINTN Size;\r | |
1112 | EFI_SMM_ACCESS2_PROTOCOL *SmmAccess;\r | |
1113 | EFI_SMRAM_DESCRIPTOR *CurrentSmramRange;\r | |
1114 | UINTN Index;\r | |
1115 | UINT64 MaxSize;\r | |
1116 | BOOLEAN Found;\r | |
1117 | \r | |
1118 | //\r | |
1119 | // Get SMM Access Protocol\r | |
1120 | //\r | |
1121 | Status = gBS->LocateProtocol (&gEfiSmmAccess2ProtocolGuid, NULL, (VOID **)&SmmAccess);\r | |
1122 | ASSERT_EFI_ERROR (Status);\r | |
1123 | \r | |
1124 | //\r | |
1125 | // Get SMRAM information\r | |
1126 | //\r | |
1127 | Size = 0;\r | |
1128 | Status = SmmAccess->GetCapabilities (SmmAccess, &Size, NULL);\r | |
1129 | ASSERT (Status == EFI_BUFFER_TOO_SMALL);\r | |
1130 | \r | |
1131 | mSmmCpuSmramRanges = (EFI_SMRAM_DESCRIPTOR *)AllocatePool (Size);\r | |
1132 | ASSERT (mSmmCpuSmramRanges != NULL);\r | |
1133 | \r | |
1134 | Status = SmmAccess->GetCapabilities (SmmAccess, &Size, mSmmCpuSmramRanges);\r | |
1135 | ASSERT_EFI_ERROR (Status);\r | |
1136 | \r | |
1137 | mSmmCpuSmramRangeCount = Size / sizeof (EFI_SMRAM_DESCRIPTOR);\r | |
1138 | \r | |
1139 | //\r | |
1140 | // Find the largest SMRAM range between 1MB and 4GB that is at least 256K - 4K in size\r | |
1141 | //\r | |
1142 | CurrentSmramRange = NULL;\r | |
1143 | for (Index = 0, MaxSize = SIZE_256KB - EFI_PAGE_SIZE; Index < mSmmCpuSmramRangeCount; Index++) {\r | |
1144 | //\r | |
1145 | // Skip any SMRAM region that is already allocated, needs testing, or needs ECC initialization\r | |
1146 | //\r | |
1147 | if ((mSmmCpuSmramRanges[Index].RegionState & (EFI_ALLOCATED | EFI_NEEDS_TESTING | EFI_NEEDS_ECC_INITIALIZATION)) != 0) {\r | |
1148 | continue;\r | |
1149 | }\r | |
1150 | \r | |
1151 | if (mSmmCpuSmramRanges[Index].CpuStart >= BASE_1MB) {\r | |
1152 | if ((mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize) <= SMRR_MAX_ADDRESS) {\r | |
1153 | if (mSmmCpuSmramRanges[Index].PhysicalSize >= MaxSize) {\r | |
1154 | MaxSize = mSmmCpuSmramRanges[Index].PhysicalSize;\r | |
1155 | CurrentSmramRange = &mSmmCpuSmramRanges[Index];\r | |
1156 | }\r | |
1157 | }\r | |
1158 | }\r | |
1159 | }\r | |
1160 | \r | |
1161 | ASSERT (CurrentSmramRange != NULL);\r | |
1162 | \r | |
1163 | *SmrrBase = (UINT32)CurrentSmramRange->CpuStart;\r | |
1164 | *SmrrSize = (UINT32)CurrentSmramRange->PhysicalSize;\r | |
1165 | \r | |
1166 | do {\r | |
1167 | Found = FALSE;\r | |
1168 | for (Index = 0; Index < mSmmCpuSmramRangeCount; Index++) {\r | |
1169 | if ((mSmmCpuSmramRanges[Index].CpuStart < *SmrrBase) &&\r | |
1170 | (*SmrrBase == (mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize)))\r | |
1171 | {\r | |
1172 | *SmrrBase = (UINT32)mSmmCpuSmramRanges[Index].CpuStart;\r | |
1173 | *SmrrSize = (UINT32)(*SmrrSize + mSmmCpuSmramRanges[Index].PhysicalSize);\r | |
1174 | Found = TRUE;\r | |
1175 | } else if (((*SmrrBase + *SmrrSize) == mSmmCpuSmramRanges[Index].CpuStart) && (mSmmCpuSmramRanges[Index].PhysicalSize > 0)) {\r | |
1176 | *SmrrSize = (UINT32)(*SmrrSize + mSmmCpuSmramRanges[Index].PhysicalSize);\r | |
1177 | Found = TRUE;\r | |
1178 | }\r | |
1179 | }\r | |
1180 | } while (Found);\r | |
1181 | \r | |
1182 | DEBUG ((DEBUG_INFO, "SMRR Base: 0x%x, SMRR Size: 0x%x\n", *SmrrBase, *SmrrSize));\r | |
1183 | }\r | |
1184 | \r | |
1185 | /**\r | |
1186 | Configure SMM Code Access Check feature on an AP.\r | |
1187 | SMM Feature Control MSR will be locked after configuration.\r | |
1188 | \r | |
1189 | @param[in,out] Buffer Pointer to private data buffer.\r | |
1190 | **/\r | |
1191 | VOID\r | |
1192 | EFIAPI\r | |
1193 | ConfigSmmCodeAccessCheckOnCurrentProcessor (\r | |
1194 | IN OUT VOID *Buffer\r | |
1195 | )\r | |
1196 | {\r | |
1197 | UINTN CpuIndex;\r | |
1198 | UINT64 SmmFeatureControlMsr;\r | |
1199 | UINT64 NewSmmFeatureControlMsr;\r | |
1200 | \r | |
1201 | //\r | |
1202 | // Retrieve the CPU Index from the context passed in\r | |
1203 | //\r | |
1204 | CpuIndex = *(UINTN *)Buffer;\r | |
1205 | \r | |
1206 | //\r | |
1207 | // Get the current SMM Feature Control MSR value\r | |
1208 | //\r | |
1209 | SmmFeatureControlMsr = SmmCpuFeaturesGetSmmRegister (CpuIndex, SmmRegFeatureControl);\r | |
1210 | \r | |
1211 | //\r | |
1212 | // Compute the new SMM Feature Control MSR value\r | |
1213 | //\r | |
1214 | NewSmmFeatureControlMsr = SmmFeatureControlMsr;\r | |
1215 | if (mSmmCodeAccessCheckEnable) {\r | |
1216 | NewSmmFeatureControlMsr |= SMM_CODE_CHK_EN_BIT;\r | |
1217 | if (FeaturePcdGet (PcdCpuSmmFeatureControlMsrLock)) {\r | |
1218 | NewSmmFeatureControlMsr |= SMM_FEATURE_CONTROL_LOCK_BIT;\r | |
1219 | }\r | |
1220 | }\r | |
1221 | \r | |
1222 | //\r | |
1223 | // Only set the SMM Feature Control MSR value if the new value is different than the current value\r | |
1224 | //\r | |
1225 | if (NewSmmFeatureControlMsr != SmmFeatureControlMsr) {\r | |
1226 | SmmCpuFeaturesSetSmmRegister (CpuIndex, SmmRegFeatureControl, NewSmmFeatureControlMsr);\r | |
1227 | }\r | |
1228 | \r | |
1229 | //\r | |
1230 | // Release the spin lock user to serialize the updates to the SMM Feature Control MSR\r | |
1231 | //\r | |
1232 | ReleaseSpinLock (mConfigSmmCodeAccessCheckLock);\r | |
1233 | }\r | |
1234 | \r | |
1235 | /**\r | |
1236 | Configure SMM Code Access Check feature for all processors.\r | |
1237 | SMM Feature Control MSR will be locked after configuration.\r | |
1238 | **/\r | |
1239 | VOID\r | |
1240 | ConfigSmmCodeAccessCheck (\r | |
1241 | VOID\r | |
1242 | )\r | |
1243 | {\r | |
1244 | UINTN Index;\r | |
1245 | EFI_STATUS Status;\r | |
1246 | \r | |
1247 | //\r | |
1248 | // Check to see if the Feature Control MSR is supported on this CPU\r | |
1249 | //\r | |
1250 | Index = gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu;\r | |
1251 | if (!SmmCpuFeaturesIsSmmRegisterSupported (Index, SmmRegFeatureControl)) {\r | |
1252 | mSmmCodeAccessCheckEnable = FALSE;\r | |
1253 | return;\r | |
1254 | }\r | |
1255 | \r | |
1256 | //\r | |
1257 | // Check to see if the CPU supports the SMM Code Access Check feature\r | |
1258 | // Do not access this MSR unless the CPU supports the SmmRegFeatureControl\r | |
1259 | //\r | |
1260 | if ((AsmReadMsr64 (EFI_MSR_SMM_MCA_CAP) & SMM_CODE_ACCESS_CHK_BIT) == 0) {\r | |
1261 | mSmmCodeAccessCheckEnable = FALSE;\r | |
1262 | return;\r | |
1263 | }\r | |
1264 | \r | |
1265 | //\r | |
1266 | // Initialize the lock used to serialize the MSR programming in BSP and all APs\r | |
1267 | //\r | |
1268 | InitializeSpinLock (mConfigSmmCodeAccessCheckLock);\r | |
1269 | \r | |
1270 | //\r | |
1271 | // Acquire Config SMM Code Access Check spin lock. The BSP will release the\r | |
1272 | // spin lock when it is done executing ConfigSmmCodeAccessCheckOnCurrentProcessor().\r | |
1273 | //\r | |
1274 | AcquireSpinLock (mConfigSmmCodeAccessCheckLock);\r | |
1275 | \r | |
1276 | //\r | |
1277 | // Enable SMM Code Access Check feature on the BSP.\r | |
1278 | //\r | |
1279 | ConfigSmmCodeAccessCheckOnCurrentProcessor (&Index);\r | |
1280 | \r | |
1281 | //\r | |
1282 | // Enable SMM Code Access Check feature for the APs.\r | |
1283 | //\r | |
1284 | for (Index = 0; Index < gSmst->NumberOfCpus; Index++) {\r | |
1285 | if (Index != gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) {\r | |
1286 | if (gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId == INVALID_APIC_ID) {\r | |
1287 | //\r | |
1288 | // If this processor does not exist\r | |
1289 | //\r | |
1290 | continue;\r | |
1291 | }\r | |
1292 | \r | |
1293 | //\r | |
1294 | // Acquire Config SMM Code Access Check spin lock. The AP will release the\r | |
1295 | // spin lock when it is done executing ConfigSmmCodeAccessCheckOnCurrentProcessor().\r | |
1296 | //\r | |
1297 | AcquireSpinLock (mConfigSmmCodeAccessCheckLock);\r | |
1298 | \r | |
1299 | //\r | |
1300 | // Call SmmStartupThisAp() to enable SMM Code Access Check on an AP.\r | |
1301 | //\r | |
1302 | Status = gSmst->SmmStartupThisAp (ConfigSmmCodeAccessCheckOnCurrentProcessor, Index, &Index);\r | |
1303 | ASSERT_EFI_ERROR (Status);\r | |
1304 | \r | |
1305 | //\r | |
1306 | // Wait for the AP to release the Config SMM Code Access Check spin lock.\r | |
1307 | //\r | |
1308 | while (!AcquireSpinLockOrFail (mConfigSmmCodeAccessCheckLock)) {\r | |
1309 | CpuPause ();\r | |
1310 | }\r | |
1311 | \r | |
1312 | //\r | |
1313 | // Release the Config SMM Code Access Check spin lock.\r | |
1314 | //\r | |
1315 | ReleaseSpinLock (mConfigSmmCodeAccessCheckLock);\r | |
1316 | }\r | |
1317 | }\r | |
1318 | }\r | |
1319 | \r | |
1320 | /**\r | |
1321 | This API provides a way to allocate memory for page table.\r | |
1322 | \r | |
1323 | This API can be called more once to allocate memory for page tables.\r | |
1324 | \r | |
1325 | Allocates the number of 4KB pages of type EfiRuntimeServicesData and returns a pointer to the\r | |
1326 | allocated buffer. The buffer returned is aligned on a 4KB boundary. If Pages is 0, then NULL\r | |
1327 | is returned. If there is not enough memory remaining to satisfy the request, then NULL is\r | |
1328 | returned.\r | |
1329 | \r | |
1330 | @param Pages The number of 4 KB pages to allocate.\r | |
1331 | \r | |
1332 | @return A pointer to the allocated buffer or NULL if allocation fails.\r | |
1333 | \r | |
1334 | **/\r | |
1335 | VOID *\r | |
1336 | AllocatePageTableMemory (\r | |
1337 | IN UINTN Pages\r | |
1338 | )\r | |
1339 | {\r | |
1340 | VOID *Buffer;\r | |
1341 | \r | |
1342 | Buffer = SmmCpuFeaturesAllocatePageTableMemory (Pages);\r | |
1343 | if (Buffer != NULL) {\r | |
1344 | return Buffer;\r | |
1345 | }\r | |
1346 | \r | |
1347 | return AllocatePages (Pages);\r | |
1348 | }\r | |
1349 | \r | |
1350 | /**\r | |
1351 | Allocate pages for code.\r | |
1352 | \r | |
1353 | @param[in] Pages Number of pages to be allocated.\r | |
1354 | \r | |
1355 | @return Allocated memory.\r | |
1356 | **/\r | |
1357 | VOID *\r | |
1358 | AllocateCodePages (\r | |
1359 | IN UINTN Pages\r | |
1360 | )\r | |
1361 | {\r | |
1362 | EFI_STATUS Status;\r | |
1363 | EFI_PHYSICAL_ADDRESS Memory;\r | |
1364 | \r | |
1365 | if (Pages == 0) {\r | |
1366 | return NULL;\r | |
1367 | }\r | |
1368 | \r | |
1369 | Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, Pages, &Memory);\r | |
1370 | if (EFI_ERROR (Status)) {\r | |
1371 | return NULL;\r | |
1372 | }\r | |
1373 | \r | |
1374 | return (VOID *)(UINTN)Memory;\r | |
1375 | }\r | |
1376 | \r | |
1377 | /**\r | |
1378 | Allocate aligned pages for code.\r | |
1379 | \r | |
1380 | @param[in] Pages Number of pages to be allocated.\r | |
1381 | @param[in] Alignment The requested alignment of the allocation.\r | |
1382 | Must be a power of two.\r | |
1383 | If Alignment is zero, then byte alignment is used.\r | |
1384 | \r | |
1385 | @return Allocated memory.\r | |
1386 | **/\r | |
1387 | VOID *\r | |
1388 | AllocateAlignedCodePages (\r | |
1389 | IN UINTN Pages,\r | |
1390 | IN UINTN Alignment\r | |
1391 | )\r | |
1392 | {\r | |
1393 | EFI_STATUS Status;\r | |
1394 | EFI_PHYSICAL_ADDRESS Memory;\r | |
1395 | UINTN AlignedMemory;\r | |
1396 | UINTN AlignmentMask;\r | |
1397 | UINTN UnalignedPages;\r | |
1398 | UINTN RealPages;\r | |
1399 | \r | |
1400 | //\r | |
1401 | // Alignment must be a power of two or zero.\r | |
1402 | //\r | |
1403 | ASSERT ((Alignment & (Alignment - 1)) == 0);\r | |
1404 | \r | |
1405 | if (Pages == 0) {\r | |
1406 | return NULL;\r | |
1407 | }\r | |
1408 | \r | |
1409 | if (Alignment > EFI_PAGE_SIZE) {\r | |
1410 | //\r | |
1411 | // Calculate the total number of pages since alignment is larger than page size.\r | |
1412 | //\r | |
1413 | AlignmentMask = Alignment - 1;\r | |
1414 | RealPages = Pages + EFI_SIZE_TO_PAGES (Alignment);\r | |
1415 | //\r | |
1416 | // Make sure that Pages plus EFI_SIZE_TO_PAGES (Alignment) does not overflow.\r | |
1417 | //\r | |
1418 | ASSERT (RealPages > Pages);\r | |
1419 | \r | |
1420 | Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, RealPages, &Memory);\r | |
1421 | if (EFI_ERROR (Status)) {\r | |
1422 | return NULL;\r | |
1423 | }\r | |
1424 | \r | |
1425 | AlignedMemory = ((UINTN)Memory + AlignmentMask) & ~AlignmentMask;\r | |
1426 | UnalignedPages = EFI_SIZE_TO_PAGES (AlignedMemory - (UINTN)Memory);\r | |
1427 | if (UnalignedPages > 0) {\r | |
1428 | //\r | |
1429 | // Free first unaligned page(s).\r | |
1430 | //\r | |
1431 | Status = gSmst->SmmFreePages (Memory, UnalignedPages);\r | |
1432 | ASSERT_EFI_ERROR (Status);\r | |
1433 | }\r | |
1434 | \r | |
1435 | Memory = AlignedMemory + EFI_PAGES_TO_SIZE (Pages);\r | |
1436 | UnalignedPages = RealPages - Pages - UnalignedPages;\r | |
1437 | if (UnalignedPages > 0) {\r | |
1438 | //\r | |
1439 | // Free last unaligned page(s).\r | |
1440 | //\r | |
1441 | Status = gSmst->SmmFreePages (Memory, UnalignedPages);\r | |
1442 | ASSERT_EFI_ERROR (Status);\r | |
1443 | }\r | |
1444 | } else {\r | |
1445 | //\r | |
1446 | // Do not over-allocate pages in this case.\r | |
1447 | //\r | |
1448 | Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, Pages, &Memory);\r | |
1449 | if (EFI_ERROR (Status)) {\r | |
1450 | return NULL;\r | |
1451 | }\r | |
1452 | \r | |
1453 | AlignedMemory = (UINTN)Memory;\r | |
1454 | }\r | |
1455 | \r | |
1456 | return (VOID *)AlignedMemory;\r | |
1457 | }\r | |
1458 | \r | |
1459 | /**\r | |
1460 | Perform the remaining tasks.\r | |
1461 | \r | |
1462 | **/\r | |
1463 | VOID\r | |
1464 | PerformRemainingTasks (\r | |
1465 | VOID\r | |
1466 | )\r | |
1467 | {\r | |
1468 | if (mSmmReadyToLock) {\r | |
1469 | //\r | |
1470 | // Start SMM Profile feature\r | |
1471 | //\r | |
1472 | if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r | |
1473 | SmmProfileStart ();\r | |
1474 | }\r | |
1475 | \r | |
1476 | //\r | |
1477 | // Create a mix of 2MB and 4KB page table. Update some memory ranges absent and execute-disable.\r | |
1478 | //\r | |
1479 | InitPaging ();\r | |
1480 | \r | |
1481 | //\r | |
1482 | // Mark critical region to be read-only in page table\r | |
1483 | //\r | |
1484 | SetMemMapAttributes ();\r | |
1485 | \r | |
1486 | if (IsRestrictedMemoryAccess ()) {\r | |
1487 | //\r | |
1488 | // For outside SMRAM, we only map SMM communication buffer or MMIO.\r | |
1489 | //\r | |
1490 | SetUefiMemMapAttributes ();\r | |
1491 | \r | |
1492 | //\r | |
1493 | // Set page table itself to be read-only\r | |
1494 | //\r | |
1495 | SetPageTableAttributes ();\r | |
1496 | }\r | |
1497 | \r | |
1498 | //\r | |
1499 | // Configure SMM Code Access Check feature if available.\r | |
1500 | //\r | |
1501 | ConfigSmmCodeAccessCheck ();\r | |
1502 | \r | |
1503 | SmmCpuFeaturesCompleteSmmReadyToLock ();\r | |
1504 | \r | |
1505 | //\r | |
1506 | // Clean SMM ready to lock flag\r | |
1507 | //\r | |
1508 | mSmmReadyToLock = FALSE;\r | |
1509 | }\r | |
1510 | }\r | |
1511 | \r | |
1512 | /**\r | |
1513 | Perform the pre tasks.\r | |
1514 | \r | |
1515 | **/\r | |
1516 | VOID\r | |
1517 | PerformPreTasks (\r | |
1518 | VOID\r | |
1519 | )\r | |
1520 | {\r | |
1521 | RestoreSmmConfigurationInS3 ();\r | |
1522 | }\r |