]> git.proxmox.com Git - mirror_edk2.git/blame - UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c
UefiCpuPkg/CpuExceptionHandlerLib: Code optimization to allow bigger stack
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / MpService.c
CommitLineData
529a5a86
MK
1/** @file\r
2SMM MP service implementation\r
3\r
4a68176c 4Copyright (c) 2009 - 2022, Intel Corporation. All rights reserved.<BR>\r
241f9149
LD
5Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>\r
6\r
0acd8697 7SPDX-License-Identifier: BSD-2-Clause-Patent\r
529a5a86
MK
8\r
9**/\r
10\r
11#include "PiSmmCpuDxeSmm.h"\r
12\r
13//\r
14// Slots for all MTRR( FIXED MTRR + VARIABLE MTRR + MTRR_LIB_IA32_MTRR_DEF_TYPE)\r
15//\r
053e878b
MK
16MTRR_SETTINGS gSmiMtrrs;\r
17UINT64 gPhyMask;\r
18SMM_DISPATCHER_MP_SYNC_DATA *mSmmMpSyncData = NULL;\r
19UINTN mSmmMpSyncDataSize;\r
20SMM_CPU_SEMAPHORES mSmmCpuSemaphores;\r
21UINTN mSemaphoreSize;\r
22SPIN_LOCK *mPFLock = NULL;\r
23SMM_CPU_SYNC_MODE mCpuSmmSyncMode;\r
24BOOLEAN mMachineCheckSupported = FALSE;\r
25MM_COMPLETION mSmmStartupThisApToken;\r
26\r
27extern UINTN mSmmShadowStackSize;\r
ef91b073 28\r
529a5a86
MK
29/**\r
30 Performs an atomic compare exchange operation to get semaphore.\r
31 The compare exchange operation must be performed using\r
32 MP safe mechanisms.\r
33\r
34 @param Sem IN: 32-bit unsigned integer\r
35 OUT: original integer - 1\r
36 @return Original integer - 1\r
37\r
38**/\r
39UINT32\r
40WaitForSemaphore (\r
053e878b 41 IN OUT volatile UINT32 *Sem\r
529a5a86
MK
42 )\r
43{\r
053e878b 44 UINT32 Value;\r
529a5a86 45\r
053e878b 46 for ( ; ;) {\r
529a5a86 47 Value = *Sem;\r
053e878b
MK
48 if ((Value != 0) &&\r
49 (InterlockedCompareExchange32 (\r
50 (UINT32 *)Sem,\r
51 Value,\r
52 Value - 1\r
53 ) == Value))\r
54 {\r
9001b750
LE
55 break;\r
56 }\r
053e878b 57\r
9001b750
LE
58 CpuPause ();\r
59 }\r
053e878b 60\r
529a5a86
MK
61 return Value - 1;\r
62}\r
63\r
529a5a86
MK
64/**\r
65 Performs an atomic compare exchange operation to release semaphore.\r
66 The compare exchange operation must be performed using\r
67 MP safe mechanisms.\r
68\r
69 @param Sem IN: 32-bit unsigned integer\r
70 OUT: original integer + 1\r
71 @return Original integer + 1\r
72\r
73**/\r
74UINT32\r
75ReleaseSemaphore (\r
053e878b 76 IN OUT volatile UINT32 *Sem\r
529a5a86
MK
77 )\r
78{\r
053e878b 79 UINT32 Value;\r
529a5a86
MK
80\r
81 do {\r
82 Value = *Sem;\r
83 } while (Value + 1 != 0 &&\r
84 InterlockedCompareExchange32 (\r
053e878b 85 (UINT32 *)Sem,\r
529a5a86
MK
86 Value,\r
87 Value + 1\r
88 ) != Value);\r
053e878b 89\r
529a5a86
MK
90 return Value + 1;\r
91}\r
92\r
93/**\r
94 Performs an atomic compare exchange operation to lock semaphore.\r
95 The compare exchange operation must be performed using\r
96 MP safe mechanisms.\r
97\r
98 @param Sem IN: 32-bit unsigned integer\r
99 OUT: -1\r
100 @return Original integer\r
101\r
102**/\r
103UINT32\r
104LockdownSemaphore (\r
053e878b 105 IN OUT volatile UINT32 *Sem\r
529a5a86
MK
106 )\r
107{\r
053e878b 108 UINT32 Value;\r
529a5a86
MK
109\r
110 do {\r
111 Value = *Sem;\r
112 } while (InterlockedCompareExchange32 (\r
053e878b
MK
113 (UINT32 *)Sem,\r
114 Value,\r
115 (UINT32)-1\r
529a5a86 116 ) != Value);\r
053e878b 117\r
529a5a86
MK
118 return Value;\r
119}\r
120\r
121/**\r
122 Wait all APs to performs an atomic compare exchange operation to release semaphore.\r
123\r
124 @param NumberOfAPs AP number\r
125\r
126**/\r
127VOID\r
128WaitForAllAPs (\r
053e878b 129 IN UINTN NumberOfAPs\r
529a5a86
MK
130 )\r
131{\r
053e878b 132 UINTN BspIndex;\r
529a5a86
MK
133\r
134 BspIndex = mSmmMpSyncData->BspIndex;\r
135 while (NumberOfAPs-- > 0) {\r
ed3d5ecb 136 WaitForSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
529a5a86
MK
137 }\r
138}\r
139\r
140/**\r
141 Performs an atomic compare exchange operation to release semaphore\r
142 for each AP.\r
143\r
144**/\r
145VOID\r
146ReleaseAllAPs (\r
147 VOID\r
148 )\r
149{\r
053e878b 150 UINTN Index;\r
529a5a86 151\r
70911f1f 152 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
51dd408a 153 if (IsPresentAp (Index)) {\r
ed3d5ecb 154 ReleaseSemaphore (mSmmMpSyncData->CpuData[Index].Run);\r
529a5a86
MK
155 }\r
156 }\r
157}\r
158\r
159/**\r
160 Checks if all CPUs (with certain exceptions) have checked in for this SMI run\r
161\r
162 @param Exceptions CPU Arrival exception flags.\r
163\r
164 @retval TRUE if all CPUs the have checked in.\r
165 @retval FALSE if at least one Normal AP hasn't checked in.\r
166\r
167**/\r
168BOOLEAN\r
169AllCpusInSmmWithExceptions (\r
170 SMM_CPU_ARRIVAL_EXCEPTIONS Exceptions\r
171 )\r
172{\r
053e878b
MK
173 UINTN Index;\r
174 SMM_CPU_DATA_BLOCK *CpuData;\r
175 EFI_PROCESSOR_INFORMATION *ProcessorInfo;\r
529a5a86 176\r
fe3a75bc 177 ASSERT (*mSmmMpSyncData->Counter <= mNumberOfCpus);\r
529a5a86 178\r
fe3a75bc 179 if (*mSmmMpSyncData->Counter == mNumberOfCpus) {\r
529a5a86
MK
180 return TRUE;\r
181 }\r
182\r
053e878b 183 CpuData = mSmmMpSyncData->CpuData;\r
529a5a86 184 ProcessorInfo = gSmmCpuPrivate->ProcessorInfo;\r
70911f1f 185 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
053e878b
MK
186 if (!(*(CpuData[Index].Present)) && (ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID)) {\r
187 if (((Exceptions & ARRIVAL_EXCEPTION_DELAYED) != 0) && (SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmDelayed) != 0)) {\r
529a5a86
MK
188 continue;\r
189 }\r
053e878b
MK
190\r
191 if (((Exceptions & ARRIVAL_EXCEPTION_BLOCKED) != 0) && (SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmBlocked) != 0)) {\r
529a5a86
MK
192 continue;\r
193 }\r
053e878b
MK
194\r
195 if (((Exceptions & ARRIVAL_EXCEPTION_SMI_DISABLED) != 0) && (SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmEnable) != 0)) {\r
529a5a86
MK
196 continue;\r
197 }\r
053e878b 198\r
529a5a86
MK
199 return FALSE;\r
200 }\r
201 }\r
202\r
529a5a86
MK
203 return TRUE;\r
204}\r
205\r
12c66382
ED
206/**\r
207 Has OS enabled Lmce in the MSR_IA32_MCG_EXT_CTL\r
7367cc6c 208\r
12c66382
ED
209 @retval TRUE Os enable lmce.\r
210 @retval FALSE Os not enable lmce.\r
211\r
212**/\r
213BOOLEAN\r
214IsLmceOsEnabled (\r
215 VOID\r
216 )\r
217{\r
218 MSR_IA32_MCG_CAP_REGISTER McgCap;\r
219 MSR_IA32_FEATURE_CONTROL_REGISTER FeatureCtrl;\r
220 MSR_IA32_MCG_EXT_CTL_REGISTER McgExtCtrl;\r
221\r
222 McgCap.Uint64 = AsmReadMsr64 (MSR_IA32_MCG_CAP);\r
223 if (McgCap.Bits.MCG_LMCE_P == 0) {\r
224 return FALSE;\r
225 }\r
226\r
227 FeatureCtrl.Uint64 = AsmReadMsr64 (MSR_IA32_FEATURE_CONTROL);\r
228 if (FeatureCtrl.Bits.LmceOn == 0) {\r
229 return FALSE;\r
230 }\r
231\r
232 McgExtCtrl.Uint64 = AsmReadMsr64 (MSR_IA32_MCG_EXT_CTL);\r
053e878b 233 return (BOOLEAN)(McgExtCtrl.Bits.LMCE_EN == 1);\r
12c66382
ED
234}\r
235\r
236/**\r
7367cc6c 237 Return if Local machine check exception signaled.\r
12c66382 238\r
7367cc6c 239 Indicates (when set) that a local machine check exception was generated. This indicates that the current machine-check event was\r
12c66382
ED
240 delivered to only the logical processor.\r
241\r
242 @retval TRUE LMCE was signaled.\r
243 @retval FALSE LMCE was not signaled.\r
244\r
245**/\r
246BOOLEAN\r
247IsLmceSignaled (\r
248 VOID\r
249 )\r
250{\r
053e878b 251 MSR_IA32_MCG_STATUS_REGISTER McgStatus;\r
12c66382
ED
252\r
253 McgStatus.Uint64 = AsmReadMsr64 (MSR_IA32_MCG_STATUS);\r
053e878b 254 return (BOOLEAN)(McgStatus.Bits.LMCE_S == 1);\r
12c66382 255}\r
529a5a86
MK
256\r
257/**\r
258 Given timeout constraint, wait for all APs to arrive, and insure when this function returns, no AP will execute normal mode code before\r
259 entering SMM, except SMI disabled APs.\r
260\r
261**/\r
262VOID\r
263SmmWaitForApArrival (\r
264 VOID\r
265 )\r
266{\r
053e878b
MK
267 UINT64 Timer;\r
268 UINTN Index;\r
269 BOOLEAN LmceEn;\r
270 BOOLEAN LmceSignal;\r
529a5a86 271\r
fe3a75bc 272 ASSERT (*mSmmMpSyncData->Counter <= mNumberOfCpus);\r
529a5a86 273\r
ba40cb31
MK
274 LmceEn = FALSE;\r
275 LmceSignal = FALSE;\r
276 if (mMachineCheckSupported) {\r
277 LmceEn = IsLmceOsEnabled ();\r
053e878b 278 LmceSignal = IsLmceSignaled ();\r
ba40cb31 279 }\r
12c66382 280\r
529a5a86
MK
281 //\r
282 // Platform implementor should choose a timeout value appropriately:\r
283 // - The timeout value should balance the SMM time constrains and the likelihood that delayed CPUs are excluded in the SMM run. Note\r
284 // the SMI Handlers must ALWAYS take into account the cases that not all APs are available in an SMI run.\r
285 // - The timeout value must, in the case of 2nd timeout, be at least long enough to give time for all APs to receive the SMI IPI\r
286 // and either enter SMM or buffer the SMI, to insure there is no CPU running normal mode code when SMI handling starts. This will\r
287 // be TRUE even if a blocked CPU is brought out of the blocked state by a normal mode CPU (before the normal mode CPU received the\r
288 // SMI IPI), because with a buffered SMI, and CPU will enter SMM immediately after it is brought out of the blocked state.\r
289 // - The timeout value must be longer than longest possible IO operation in the system\r
290 //\r
291\r
292 //\r
293 // Sync with APs 1st timeout\r
294 //\r
295 for (Timer = StartSyncTimer ();\r
4a68176c 296 !IsSyncTimerTimeout (Timer) && !(LmceEn && LmceSignal);\r
053e878b
MK
297 )\r
298 {\r
4a68176c
LZ
299 mSmmMpSyncData->AllApArrivedWithException = AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED | ARRIVAL_EXCEPTION_SMI_DISABLED);\r
300 if (mSmmMpSyncData->AllApArrivedWithException) {\r
301 break;\r
302 }\r
303\r
529a5a86
MK
304 CpuPause ();\r
305 }\r
306\r
307 //\r
308 // Not all APs have arrived, so we need 2nd round of timeout. IPIs should be sent to ALL none present APs,\r
309 // because:\r
310 // a) Delayed AP may have just come out of the delayed state. Blocked AP may have just been brought out of blocked state by some AP running\r
311 // normal mode code. These APs need to be guaranteed to have an SMI pending to insure that once they are out of delayed / blocked state, they\r
312 // enter SMI immediately without executing instructions in normal mode. Note traditional flow requires there are no APs doing normal mode\r
313 // work while SMI handling is on-going.\r
314 // b) As a consequence of SMI IPI sending, (spurious) SMI may occur after this SMM run.\r
315 // c) ** NOTE **: Use SMI disabling feature VERY CAREFULLY (if at all) for traditional flow, because a processor in SMI-disabled state\r
316 // will execute normal mode code, which breaks the traditional SMI handlers' assumption that no APs are doing normal\r
317 // mode work while SMI handling is on-going.\r
318 // d) We don't add code to check SMI disabling status to skip sending IPI to SMI disabled APs, because:\r
319 // - In traditional flow, SMI disabling is discouraged.\r
320 // - In relaxed flow, CheckApArrival() will check SMI disabling status before calling this function.\r
321 // In both cases, adding SMI-disabling checking code increases overhead.\r
322 //\r
fe3a75bc 323 if (*mSmmMpSyncData->Counter < mNumberOfCpus) {\r
529a5a86
MK
324 //\r
325 // Send SMI IPIs to bring outside processors in\r
326 //\r
70911f1f 327 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
053e878b 328 if (!(*(mSmmMpSyncData->CpuData[Index].Present)) && (gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID)) {\r
529a5a86
MK
329 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId);\r
330 }\r
331 }\r
332\r
333 //\r
334 // Sync with APs 2nd timeout.\r
335 //\r
336 for (Timer = StartSyncTimer ();\r
4a68176c 337 !IsSyncTimerTimeout (Timer);\r
053e878b
MK
338 )\r
339 {\r
4a68176c
LZ
340 mSmmMpSyncData->AllApArrivedWithException = AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED | ARRIVAL_EXCEPTION_SMI_DISABLED);\r
341 if (mSmmMpSyncData->AllApArrivedWithException) {\r
342 break;\r
343 }\r
344\r
529a5a86
MK
345 CpuPause ();\r
346 }\r
347 }\r
348\r
349 return;\r
350}\r
351\r
529a5a86
MK
352/**\r
353 Replace OS MTRR's with SMI MTRR's.\r
354\r
355 @param CpuIndex Processor Index\r
356\r
357**/\r
358VOID\r
359ReplaceOSMtrrs (\r
053e878b 360 IN UINTN CpuIndex\r
529a5a86
MK
361 )\r
362{\r
529a5a86
MK
363 SmmCpuFeaturesDisableSmrr ();\r
364\r
365 //\r
366 // Replace all MTRRs registers\r
367 //\r
26ab5ac3 368 MtrrSetAllMtrrs (&gSmiMtrrs);\r
529a5a86
MK
369}\r
370\r
51dd408a
ED
371/**\r
372 Wheck whether task has been finished by all APs.\r
373\r
374 @param BlockMode Whether did it in block mode or non-block mode.\r
375\r
376 @retval TRUE Task has been finished by all APs.\r
377 @retval FALSE Task not has been finished by all APs.\r
378\r
379**/\r
380BOOLEAN\r
381WaitForAllAPsNotBusy (\r
053e878b 382 IN BOOLEAN BlockMode\r
51dd408a
ED
383 )\r
384{\r
053e878b 385 UINTN Index;\r
51dd408a 386\r
70911f1f 387 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
51dd408a
ED
388 //\r
389 // Ignore BSP and APs which not call in SMM.\r
390 //\r
053e878b 391 if (!IsPresentAp (Index)) {\r
51dd408a
ED
392 continue;\r
393 }\r
394\r
395 if (BlockMode) {\r
053e878b
MK
396 AcquireSpinLock (mSmmMpSyncData->CpuData[Index].Busy);\r
397 ReleaseSpinLock (mSmmMpSyncData->CpuData[Index].Busy);\r
51dd408a
ED
398 } else {\r
399 if (AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[Index].Busy)) {\r
053e878b 400 ReleaseSpinLock (mSmmMpSyncData->CpuData[Index].Busy);\r
51dd408a
ED
401 } else {\r
402 return FALSE;\r
403 }\r
404 }\r
405 }\r
406\r
407 return TRUE;\r
408}\r
409\r
410/**\r
411 Check whether it is an present AP.\r
412\r
413 @param CpuIndex The AP index which calls this function.\r
414\r
415 @retval TRUE It's a present AP.\r
416 @retval TRUE This is not an AP or it is not present.\r
417\r
418**/\r
419BOOLEAN\r
420IsPresentAp (\r
053e878b 421 IN UINTN CpuIndex\r
51dd408a
ED
422 )\r
423{\r
424 return ((CpuIndex != gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) &&\r
053e878b 425 *(mSmmMpSyncData->CpuData[CpuIndex].Present));\r
51dd408a
ED
426}\r
427\r
51dd408a
ED
428/**\r
429 Clean up the status flags used during executing the procedure.\r
430\r
431 @param CpuIndex The AP index which calls this function.\r
432\r
433**/\r
434VOID\r
435ReleaseToken (\r
053e878b 436 IN UINTN CpuIndex\r
51dd408a
ED
437 )\r
438{\r
053e878b 439 PROCEDURE_TOKEN *Token;\r
51dd408a 440\r
a457823f
ED
441 Token = mSmmMpSyncData->CpuData[CpuIndex].Token;\r
442\r
443 if (InterlockedDecrement (&Token->RunningApCount) == 0) {\r
444 ReleaseSpinLock (Token->SpinLock);\r
51dd408a 445 }\r
a457823f
ED
446\r
447 mSmmMpSyncData->CpuData[CpuIndex].Token = NULL;\r
51dd408a
ED
448}\r
449\r
450/**\r
451 Free the tokens in the maintained list.\r
452\r
453**/\r
454VOID\r
b948a496 455ResetTokens (\r
51dd408a
ED
456 VOID\r
457 )\r
458{\r
3fdc47c6
RN
459 //\r
460 // Reset the FirstFreeToken to the beginning of token list upon exiting SMI.\r
461 //\r
462 gSmmCpuPrivate->FirstFreeToken = GetFirstNode (&gSmmCpuPrivate->TokenList);\r
51dd408a
ED
463}\r
464\r
529a5a86
MK
465/**\r
466 SMI handler for BSP.\r
467\r
468 @param CpuIndex BSP processor Index\r
469 @param SyncMode SMM MP sync mode\r
470\r
471**/\r
472VOID\r
473BSPHandler (\r
053e878b
MK
474 IN UINTN CpuIndex,\r
475 IN SMM_CPU_SYNC_MODE SyncMode\r
529a5a86
MK
476 )\r
477{\r
053e878b
MK
478 UINTN Index;\r
479 MTRR_SETTINGS Mtrrs;\r
480 UINTN ApCount;\r
481 BOOLEAN ClearTopLevelSmiResult;\r
482 UINTN PresentCount;\r
529a5a86
MK
483\r
484 ASSERT (CpuIndex == mSmmMpSyncData->BspIndex);\r
485 ApCount = 0;\r
486\r
487 //\r
488 // Flag BSP's presence\r
489 //\r
fe3a75bc 490 *mSmmMpSyncData->InsideSmm = TRUE;\r
529a5a86
MK
491\r
492 //\r
493 // Initialize Debug Agent to start source level debug in BSP handler\r
494 //\r
495 InitializeDebugAgent (DEBUG_AGENT_INIT_ENTER_SMI, NULL, NULL);\r
496\r
497 //\r
498 // Mark this processor's presence\r
499 //\r
ed3d5ecb 500 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = TRUE;\r
529a5a86
MK
501\r
502 //\r
503 // Clear platform top level SMI status bit before calling SMI handlers. If\r
504 // we cleared it after SMI handlers are run, we would miss the SMI that\r
505 // occurs after SMI handlers are done and before SMI status bit is cleared.\r
506 //\r
053e878b 507 ClearTopLevelSmiResult = ClearTopLevelSmiStatus ();\r
529a5a86
MK
508 ASSERT (ClearTopLevelSmiResult == TRUE);\r
509\r
510 //\r
511 // Set running processor index\r
512 //\r
513 gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu = CpuIndex;\r
514\r
515 //\r
516 // If Traditional Sync Mode or need to configure MTRRs: gather all available APs.\r
517 //\r
053e878b 518 if ((SyncMode == SmmCpuSyncModeTradition) || SmmCpuFeaturesNeedConfigureMtrrs ()) {\r
529a5a86
MK
519 //\r
520 // Wait for APs to arrive\r
521 //\r
053e878b 522 SmmWaitForApArrival ();\r
529a5a86
MK
523\r
524 //\r
525 // Lock the counter down and retrieve the number of APs\r
526 //\r
fe3a75bc 527 *mSmmMpSyncData->AllCpusInSync = TRUE;\r
053e878b 528 ApCount = LockdownSemaphore (mSmmMpSyncData->Counter) - 1;\r
529a5a86
MK
529\r
530 //\r
531 // Wait for all APs to get ready for programming MTRRs\r
532 //\r
533 WaitForAllAPs (ApCount);\r
534\r
053e878b 535 if (SmmCpuFeaturesNeedConfigureMtrrs ()) {\r
529a5a86
MK
536 //\r
537 // Signal all APs it's time for backup MTRRs\r
538 //\r
539 ReleaseAllAPs ();\r
540\r
541 //\r
542 // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at\r
543 // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set\r
544 // to a large enough value to avoid this situation.\r
545 // Note: For HT capable CPUs, threads within a core share the same set of MTRRs.\r
546 // We do the backup first and then set MTRR to avoid race condition for threads\r
547 // in the same core.\r
548 //\r
053e878b 549 MtrrGetAllMtrrs (&Mtrrs);\r
529a5a86
MK
550\r
551 //\r
552 // Wait for all APs to complete their MTRR saving\r
553 //\r
554 WaitForAllAPs (ApCount);\r
555\r
556 //\r
557 // Let all processors program SMM MTRRs together\r
558 //\r
559 ReleaseAllAPs ();\r
560\r
561 //\r
562 // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at\r
563 // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set\r
564 // to a large enough value to avoid this situation.\r
565 //\r
566 ReplaceOSMtrrs (CpuIndex);\r
567\r
568 //\r
569 // Wait for all APs to complete their MTRR programming\r
570 //\r
571 WaitForAllAPs (ApCount);\r
572 }\r
573 }\r
574\r
575 //\r
576 // The BUSY lock is initialized to Acquired state\r
577 //\r
170a3c1e 578 AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
529a5a86
MK
579\r
580 //\r
9f419739 581 // Perform the pre tasks\r
529a5a86 582 //\r
9f419739 583 PerformPreTasks ();\r
529a5a86
MK
584\r
585 //\r
586 // Invoke SMM Foundation EntryPoint with the processor information context.\r
587 //\r
588 gSmmCpuPrivate->SmmCoreEntry (&gSmmCpuPrivate->SmmCoreEntryContext);\r
589\r
590 //\r
591 // Make sure all APs have completed their pending none-block tasks\r
592 //\r
51dd408a 593 WaitForAllAPsNotBusy (TRUE);\r
529a5a86
MK
594\r
595 //\r
596 // Perform the remaining tasks\r
597 //\r
598 PerformRemainingTasks ();\r
599\r
600 //\r
601 // If Relaxed-AP Sync Mode: gather all available APs after BSP SMM handlers are done, and\r
602 // make those APs to exit SMI synchronously. APs which arrive later will be excluded and\r
603 // will run through freely.\r
604 //\r
053e878b 605 if ((SyncMode != SmmCpuSyncModeTradition) && !SmmCpuFeaturesNeedConfigureMtrrs ()) {\r
529a5a86
MK
606 //\r
607 // Lock the counter down and retrieve the number of APs\r
608 //\r
fe3a75bc 609 *mSmmMpSyncData->AllCpusInSync = TRUE;\r
053e878b 610 ApCount = LockdownSemaphore (mSmmMpSyncData->Counter) - 1;\r
529a5a86
MK
611 //\r
612 // Make sure all APs have their Present flag set\r
613 //\r
614 while (TRUE) {\r
615 PresentCount = 0;\r
70911f1f 616 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
ed3d5ecb 617 if (*(mSmmMpSyncData->CpuData[Index].Present)) {\r
053e878b 618 PresentCount++;\r
529a5a86
MK
619 }\r
620 }\r
053e878b 621\r
529a5a86
MK
622 if (PresentCount > ApCount) {\r
623 break;\r
624 }\r
625 }\r
626 }\r
627\r
628 //\r
629 // Notify all APs to exit\r
630 //\r
fe3a75bc 631 *mSmmMpSyncData->InsideSmm = FALSE;\r
529a5a86
MK
632 ReleaseAllAPs ();\r
633\r
634 //\r
635 // Wait for all APs to complete their pending tasks\r
636 //\r
637 WaitForAllAPs (ApCount);\r
638\r
053e878b 639 if (SmmCpuFeaturesNeedConfigureMtrrs ()) {\r
529a5a86
MK
640 //\r
641 // Signal APs to restore MTRRs\r
642 //\r
643 ReleaseAllAPs ();\r
644\r
645 //\r
646 // Restore OS MTRRs\r
647 //\r
648 SmmCpuFeaturesReenableSmrr ();\r
053e878b 649 MtrrSetAllMtrrs (&Mtrrs);\r
529a5a86
MK
650\r
651 //\r
652 // Wait for all APs to complete MTRR programming\r
653 //\r
654 WaitForAllAPs (ApCount);\r
655 }\r
656\r
657 //\r
658 // Stop source level debug in BSP handler, the code below will not be\r
659 // debugged.\r
660 //\r
661 InitializeDebugAgent (DEBUG_AGENT_INIT_EXIT_SMI, NULL, NULL);\r
662\r
663 //\r
664 // Signal APs to Reset states/semaphore for this processor\r
665 //\r
666 ReleaseAllAPs ();\r
667\r
668 //\r
669 // Perform pending operations for hot-plug\r
670 //\r
671 SmmCpuUpdate ();\r
672\r
673 //\r
674 // Clear the Present flag of BSP\r
675 //\r
ed3d5ecb 676 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;\r
529a5a86
MK
677\r
678 //\r
679 // Gather APs to exit SMM synchronously. Note the Present flag is cleared by now but\r
680 // WaitForAllAps does not depend on the Present flag.\r
681 //\r
682 WaitForAllAPs (ApCount);\r
683\r
51dd408a 684 //\r
b948a496 685 // Reset the tokens buffer.\r
51dd408a 686 //\r
b948a496 687 ResetTokens ();\r
51dd408a 688\r
529a5a86
MK
689 //\r
690 // Reset BspIndex to -1, meaning BSP has not been elected.\r
691 //\r
692 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {\r
693 mSmmMpSyncData->BspIndex = (UINT32)-1;\r
694 }\r
695\r
696 //\r
697 // Allow APs to check in from this point on\r
698 //\r
053e878b 699 *mSmmMpSyncData->Counter = 0;\r
fe3a75bc 700 *mSmmMpSyncData->AllCpusInSync = FALSE;\r
529a5a86
MK
701}\r
702\r
703/**\r
704 SMI handler for AP.\r
705\r
706 @param CpuIndex AP processor Index.\r
707 @param ValidSmi Indicates that current SMI is a valid SMI or not.\r
708 @param SyncMode SMM MP sync mode.\r
709\r
710**/\r
711VOID\r
712APHandler (\r
053e878b
MK
713 IN UINTN CpuIndex,\r
714 IN BOOLEAN ValidSmi,\r
715 IN SMM_CPU_SYNC_MODE SyncMode\r
529a5a86
MK
716 )\r
717{\r
053e878b
MK
718 UINT64 Timer;\r
719 UINTN BspIndex;\r
720 MTRR_SETTINGS Mtrrs;\r
721 EFI_STATUS ProcedureStatus;\r
529a5a86
MK
722\r
723 //\r
724 // Timeout BSP\r
725 //\r
726 for (Timer = StartSyncTimer ();\r
727 !IsSyncTimerTimeout (Timer) &&\r
fe3a75bc 728 !(*mSmmMpSyncData->InsideSmm);\r
053e878b
MK
729 )\r
730 {\r
529a5a86
MK
731 CpuPause ();\r
732 }\r
733\r
fe3a75bc 734 if (!(*mSmmMpSyncData->InsideSmm)) {\r
529a5a86
MK
735 //\r
736 // BSP timeout in the first round\r
737 //\r
738 if (mSmmMpSyncData->BspIndex != -1) {\r
739 //\r
740 // BSP Index is known\r
741 //\r
742 BspIndex = mSmmMpSyncData->BspIndex;\r
743 ASSERT (CpuIndex != BspIndex);\r
744\r
745 //\r
746 // Send SMI IPI to bring BSP in\r
747 //\r
748 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[BspIndex].ProcessorId);\r
749\r
750 //\r
751 // Now clock BSP for the 2nd time\r
752 //\r
753 for (Timer = StartSyncTimer ();\r
754 !IsSyncTimerTimeout (Timer) &&\r
fe3a75bc 755 !(*mSmmMpSyncData->InsideSmm);\r
053e878b
MK
756 )\r
757 {\r
529a5a86
MK
758 CpuPause ();\r
759 }\r
760\r
fe3a75bc 761 if (!(*mSmmMpSyncData->InsideSmm)) {\r
529a5a86
MK
762 //\r
763 // Give up since BSP is unable to enter SMM\r
764 // and signal the completion of this AP\r
fe3a75bc 765 WaitForSemaphore (mSmmMpSyncData->Counter);\r
529a5a86
MK
766 return;\r
767 }\r
768 } else {\r
769 //\r
770 // Don't know BSP index. Give up without sending IPI to BSP.\r
771 //\r
fe3a75bc 772 WaitForSemaphore (mSmmMpSyncData->Counter);\r
529a5a86
MK
773 return;\r
774 }\r
775 }\r
776\r
777 //\r
778 // BSP is available\r
779 //\r
780 BspIndex = mSmmMpSyncData->BspIndex;\r
781 ASSERT (CpuIndex != BspIndex);\r
782\r
783 //\r
784 // Mark this processor's presence\r
785 //\r
ed3d5ecb 786 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = TRUE;\r
529a5a86 787\r
053e878b 788 if ((SyncMode == SmmCpuSyncModeTradition) || SmmCpuFeaturesNeedConfigureMtrrs ()) {\r
529a5a86
MK
789 //\r
790 // Notify BSP of arrival at this point\r
791 //\r
ed3d5ecb 792 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
529a5a86
MK
793 }\r
794\r
053e878b 795 if (SmmCpuFeaturesNeedConfigureMtrrs ()) {\r
529a5a86
MK
796 //\r
797 // Wait for the signal from BSP to backup MTRRs\r
798 //\r
ed3d5ecb 799 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r
529a5a86
MK
800\r
801 //\r
802 // Backup OS MTRRs\r
803 //\r
053e878b 804 MtrrGetAllMtrrs (&Mtrrs);\r
529a5a86
MK
805\r
806 //\r
807 // Signal BSP the completion of this AP\r
808 //\r
ed3d5ecb 809 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
529a5a86
MK
810\r
811 //\r
812 // Wait for BSP's signal to program MTRRs\r
813 //\r
ed3d5ecb 814 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r
529a5a86
MK
815\r
816 //\r
817 // Replace OS MTRRs with SMI MTRRs\r
818 //\r
819 ReplaceOSMtrrs (CpuIndex);\r
820\r
821 //\r
822 // Signal BSP the completion of this AP\r
823 //\r
ed3d5ecb 824 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
529a5a86
MK
825 }\r
826\r
827 while (TRUE) {\r
828 //\r
829 // Wait for something to happen\r
830 //\r
ed3d5ecb 831 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r
529a5a86
MK
832\r
833 //\r
834 // Check if BSP wants to exit SMM\r
835 //\r
fe3a75bc 836 if (!(*mSmmMpSyncData->InsideSmm)) {\r
529a5a86
MK
837 break;\r
838 }\r
839\r
840 //\r
841 // BUSY should be acquired by SmmStartupThisAp()\r
842 //\r
843 ASSERT (\r
ed3d5ecb 844 !AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[CpuIndex].Busy)\r
529a5a86
MK
845 );\r
846\r
847 //\r
848 // Invoke the scheduled procedure\r
849 //\r
053e878b
MK
850 ProcedureStatus = (*mSmmMpSyncData->CpuData[CpuIndex].Procedure)(\r
851 (VOID *)mSmmMpSyncData->CpuData[CpuIndex].Parameter\r
852 );\r
51dd408a
ED
853 if (mSmmMpSyncData->CpuData[CpuIndex].Status != NULL) {\r
854 *mSmmMpSyncData->CpuData[CpuIndex].Status = ProcedureStatus;\r
855 }\r
529a5a86 856\r
a457823f
ED
857 if (mSmmMpSyncData->CpuData[CpuIndex].Token != NULL) {\r
858 ReleaseToken (CpuIndex);\r
859 }\r
860\r
529a5a86
MK
861 //\r
862 // Release BUSY\r
863 //\r
ed3d5ecb 864 ReleaseSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
529a5a86
MK
865 }\r
866\r
053e878b 867 if (SmmCpuFeaturesNeedConfigureMtrrs ()) {\r
529a5a86
MK
868 //\r
869 // Notify BSP the readiness of this AP to program MTRRs\r
870 //\r
ed3d5ecb 871 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
529a5a86
MK
872\r
873 //\r
874 // Wait for the signal from BSP to program MTRRs\r
875 //\r
ed3d5ecb 876 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r
529a5a86
MK
877\r
878 //\r
879 // Restore OS MTRRs\r
880 //\r
881 SmmCpuFeaturesReenableSmrr ();\r
053e878b 882 MtrrSetAllMtrrs (&Mtrrs);\r
529a5a86
MK
883 }\r
884\r
885 //\r
886 // Notify BSP the readiness of this AP to Reset states/semaphore for this processor\r
887 //\r
ed3d5ecb 888 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
529a5a86
MK
889\r
890 //\r
891 // Wait for the signal from BSP to Reset states/semaphore for this processor\r
892 //\r
ed3d5ecb 893 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r
529a5a86
MK
894\r
895 //\r
896 // Reset states/semaphore for this processor\r
897 //\r
ed3d5ecb 898 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;\r
529a5a86
MK
899\r
900 //\r
901 // Notify BSP the readiness of this AP to exit SMM\r
902 //\r
ed3d5ecb 903 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
529a5a86
MK
904}\r
905\r
906/**\r
907 Create 4G PageTable in SMRAM.\r
908\r
717fb604 909 @param[in] Is32BitPageTable Whether the page table is 32-bit PAE\r
529a5a86
MK
910 @return PageTable Address\r
911\r
912**/\r
913UINT32\r
914Gen4GPageTable (\r
053e878b 915 IN BOOLEAN Is32BitPageTable\r
529a5a86
MK
916 )\r
917{\r
918 VOID *PageTable;\r
919 UINTN Index;\r
920 UINT64 *Pte;\r
921 UINTN PagesNeeded;\r
922 UINTN Low2MBoundary;\r
923 UINTN High2MBoundary;\r
924 UINTN Pages;\r
925 UINTN GuardPage;\r
926 UINT64 *Pdpte;\r
927 UINTN PageIndex;\r
928 UINTN PageAddress;\r
929\r
053e878b 930 Low2MBoundary = 0;\r
529a5a86 931 High2MBoundary = 0;\r
053e878b 932 PagesNeeded = 0;\r
529a5a86
MK
933 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
934 //\r
935 // Add one more page for known good stack, then find the lower 2MB aligned address.\r
936 //\r
937 Low2MBoundary = (mSmmStackArrayBase + EFI_PAGE_SIZE) & ~(SIZE_2MB-1);\r
938 //\r
939 // Add two more pages for known good stack and stack guard page,\r
940 // then find the lower 2MB aligned address.\r
941 //\r
ef91b073 942 High2MBoundary = (mSmmStackArrayEnd - mSmmStackSize - mSmmShadowStackSize + EFI_PAGE_SIZE * 2) & ~(SIZE_2MB-1);\r
053e878b 943 PagesNeeded = ((High2MBoundary - Low2MBoundary) / SIZE_2MB) + 1;\r
529a5a86 944 }\r
053e878b 945\r
529a5a86
MK
946 //\r
947 // Allocate the page table\r
948 //\r
717fb604 949 PageTable = AllocatePageTableMemory (5 + PagesNeeded);\r
529a5a86
MK
950 ASSERT (PageTable != NULL);\r
951\r
717fb604 952 PageTable = (VOID *)((UINTN)PageTable);\r
053e878b 953 Pte = (UINT64 *)PageTable;\r
529a5a86
MK
954\r
955 //\r
956 // Zero out all page table entries first\r
957 //\r
958 ZeroMem (Pte, EFI_PAGES_TO_SIZE (1));\r
959\r
960 //\r
961 // Set Page Directory Pointers\r
962 //\r
963 for (Index = 0; Index < 4; Index++) {\r
e62a0eb6 964 Pte[Index] = ((UINTN)PageTable + EFI_PAGE_SIZE * (Index + 1)) | mAddressEncMask |\r
053e878b 965 (Is32BitPageTable ? IA32_PAE_PDPTE_ATTRIBUTE_BITS : PAGE_ATTRIBUTE_BITS);\r
529a5a86 966 }\r
053e878b 967\r
529a5a86
MK
968 Pte += EFI_PAGE_SIZE / sizeof (*Pte);\r
969\r
970 //\r
971 // Fill in Page Directory Entries\r
972 //\r
973 for (Index = 0; Index < EFI_PAGE_SIZE * 4 / sizeof (*Pte); Index++) {\r
241f9149 974 Pte[Index] = (Index << 21) | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;\r
529a5a86
MK
975 }\r
976\r
053e878b 977 Pdpte = (UINT64 *)PageTable;\r
529a5a86 978 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
053e878b 979 Pages = (UINTN)PageTable + EFI_PAGES_TO_SIZE (5);\r
529a5a86 980 GuardPage = mSmmStackArrayBase + EFI_PAGE_SIZE;\r
529a5a86 981 for (PageIndex = Low2MBoundary; PageIndex <= High2MBoundary; PageIndex += SIZE_2MB) {\r
053e878b 982 Pte = (UINT64 *)(UINTN)(Pdpte[BitFieldRead32 ((UINT32)PageIndex, 30, 31)] & ~mAddressEncMask & ~(EFI_PAGE_SIZE - 1));\r
241f9149 983 Pte[BitFieldRead32 ((UINT32)PageIndex, 21, 29)] = (UINT64)Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
529a5a86
MK
984 //\r
985 // Fill in Page Table Entries\r
986 //\r
053e878b 987 Pte = (UINT64 *)Pages;\r
529a5a86
MK
988 PageAddress = PageIndex;\r
989 for (Index = 0; Index < EFI_PAGE_SIZE / sizeof (*Pte); Index++) {\r
990 if (PageAddress == GuardPage) {\r
991 //\r
992 // Mark the guard page as non-present\r
993 //\r
241f9149 994 Pte[Index] = PageAddress | mAddressEncMask;\r
ef91b073 995 GuardPage += (mSmmStackSize + mSmmShadowStackSize);\r
529a5a86
MK
996 if (GuardPage > mSmmStackArrayEnd) {\r
997 GuardPage = 0;\r
998 }\r
999 } else {\r
241f9149 1000 Pte[Index] = PageAddress | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
529a5a86 1001 }\r
053e878b
MK
1002\r
1003 PageAddress += EFI_PAGE_SIZE;\r
529a5a86 1004 }\r
053e878b 1005\r
529a5a86
MK
1006 Pages += EFI_PAGE_SIZE;\r
1007 }\r
1008 }\r
1009\r
f8c1133b 1010 if ((PcdGet8 (PcdNullPointerDetectionPropertyMask) & BIT1) != 0) {\r
053e878b 1011 Pte = (UINT64 *)(UINTN)(Pdpte[0] & ~mAddressEncMask & ~(EFI_PAGE_SIZE - 1));\r
f8c1133b
JW
1012 if ((Pte[0] & IA32_PG_PS) == 0) {\r
1013 // 4K-page entries are already mapped. Just hide the first one anyway.\r
053e878b 1014 Pte = (UINT64 *)(UINTN)(Pte[0] & ~mAddressEncMask & ~(EFI_PAGE_SIZE - 1));\r
79da2d28 1015 Pte[0] &= ~(UINT64)IA32_PG_P; // Hide page 0\r
f8c1133b
JW
1016 } else {\r
1017 // Create 4K-page entries\r
1018 Pages = (UINTN)AllocatePageTableMemory (1);\r
1019 ASSERT (Pages != 0);\r
1020\r
1021 Pte[0] = (UINT64)(Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS);\r
1022\r
053e878b 1023 Pte = (UINT64 *)Pages;\r
f8c1133b 1024 PageAddress = 0;\r
053e878b 1025 Pte[0] = PageAddress | mAddressEncMask; // Hide page 0 but present left\r
f8c1133b
JW
1026 for (Index = 1; Index < EFI_PAGE_SIZE / sizeof (*Pte); Index++) {\r
1027 PageAddress += EFI_PAGE_SIZE;\r
053e878b 1028 Pte[Index] = PageAddress | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
f8c1133b
JW
1029 }\r
1030 }\r
1031 }\r
1032\r
529a5a86
MK
1033 return (UINT32)(UINTN)PageTable;\r
1034}\r
1035\r
51dd408a
ED
1036/**\r
1037 Checks whether the input token is the current used token.\r
1038\r
1039 @param[in] Token This parameter describes the token that was passed into DispatchProcedure or\r
1040 BroadcastProcedure.\r
1041\r
1042 @retval TRUE The input token is the current used token.\r
1043 @retval FALSE The input token is not the current used token.\r
1044**/\r
1045BOOLEAN\r
1046IsTokenInUse (\r
053e878b 1047 IN SPIN_LOCK *Token\r
51dd408a
ED
1048 )\r
1049{\r
053e878b
MK
1050 LIST_ENTRY *Link;\r
1051 PROCEDURE_TOKEN *ProcToken;\r
51dd408a
ED
1052\r
1053 if (Token == NULL) {\r
1054 return FALSE;\r
1055 }\r
1056\r
1057 Link = GetFirstNode (&gSmmCpuPrivate->TokenList);\r
d84f090f
ED
1058 //\r
1059 // Only search used tokens.\r
1060 //\r
1061 while (Link != gSmmCpuPrivate->FirstFreeToken) {\r
51dd408a
ED
1062 ProcToken = PROCEDURE_TOKEN_FROM_LINK (Link);\r
1063\r
d84f090f 1064 if (ProcToken->SpinLock == Token) {\r
51dd408a
ED
1065 return TRUE;\r
1066 }\r
1067\r
1068 Link = GetNextNode (&gSmmCpuPrivate->TokenList, Link);\r
1069 }\r
1070\r
1071 return FALSE;\r
1072}\r
1073\r
1074/**\r
b948a496 1075 Allocate buffer for the SPIN_LOCK and PROCEDURE_TOKEN.\r
51dd408a 1076\r
3fdc47c6 1077 @return First token of the token buffer.\r
51dd408a 1078**/\r
3fdc47c6 1079LIST_ENTRY *\r
b948a496
ED
1080AllocateTokenBuffer (\r
1081 VOID\r
51dd408a
ED
1082 )\r
1083{\r
053e878b
MK
1084 UINTN SpinLockSize;\r
1085 UINT32 TokenCountPerChunk;\r
1086 UINTN Index;\r
1087 SPIN_LOCK *SpinLock;\r
1088 UINT8 *SpinLockBuffer;\r
1089 PROCEDURE_TOKEN *ProcTokens;\r
51dd408a
ED
1090\r
1091 SpinLockSize = GetSpinLockProperties ();\r
b948a496 1092\r
9caaa79d 1093 TokenCountPerChunk = FixedPcdGet32 (PcdCpuSmmMpTokenCountPerChunk);\r
b948a496
ED
1094 ASSERT (TokenCountPerChunk != 0);\r
1095 if (TokenCountPerChunk == 0) {\r
1096 DEBUG ((DEBUG_ERROR, "PcdCpuSmmMpTokenCountPerChunk should not be Zero!\n"));\r
1097 CpuDeadLoop ();\r
1098 }\r
053e878b 1099\r
b948a496 1100 DEBUG ((DEBUG_INFO, "CpuSmm: SpinLock Size = 0x%x, PcdCpuSmmMpTokenCountPerChunk = 0x%x\n", SpinLockSize, TokenCountPerChunk));\r
9caaa79d 1101\r
b948a496
ED
1102 //\r
1103 // Separate the Spin_lock and Proc_token because the alignment requires by Spin_Lock.\r
1104 //\r
1105 SpinLockBuffer = AllocatePool (SpinLockSize * TokenCountPerChunk);\r
1106 ASSERT (SpinLockBuffer != NULL);\r
9caaa79d 1107\r
3fdc47c6
RN
1108 ProcTokens = AllocatePool (sizeof (PROCEDURE_TOKEN) * TokenCountPerChunk);\r
1109 ASSERT (ProcTokens != NULL);\r
b948a496
ED
1110\r
1111 for (Index = 0; Index < TokenCountPerChunk; Index++) {\r
1112 SpinLock = (SPIN_LOCK *)(SpinLockBuffer + SpinLockSize * Index);\r
1113 InitializeSpinLock (SpinLock);\r
1114\r
3fdc47c6
RN
1115 ProcTokens[Index].Signature = PROCEDURE_TOKEN_SIGNATURE;\r
1116 ProcTokens[Index].SpinLock = SpinLock;\r
3fdc47c6 1117 ProcTokens[Index].RunningApCount = 0;\r
b948a496 1118\r
3fdc47c6 1119 InsertTailList (&gSmmCpuPrivate->TokenList, &ProcTokens[Index].Link);\r
b948a496 1120 }\r
9caaa79d 1121\r
3fdc47c6 1122 return &ProcTokens[0].Link;\r
b948a496
ED
1123}\r
1124\r
1125/**\r
1126 Get the free token.\r
1127\r
1128 If no free token, allocate new tokens then return the free one.\r
1129\r
e1879256
ED
1130 @param RunningApsCount The Running Aps count for this token.\r
1131\r
b948a496 1132 @retval return the first free PROCEDURE_TOKEN.\r
9caaa79d 1133\r
b948a496
ED
1134**/\r
1135PROCEDURE_TOKEN *\r
1136GetFreeToken (\r
053e878b 1137 IN UINT32 RunningApsCount\r
b948a496
ED
1138 )\r
1139{\r
1140 PROCEDURE_TOKEN *NewToken;\r
51dd408a 1141\r
3fdc47c6
RN
1142 //\r
1143 // If FirstFreeToken meets the end of token list, enlarge the token list.\r
1144 // Set FirstFreeToken to the first free token.\r
1145 //\r
1146 if (gSmmCpuPrivate->FirstFreeToken == &gSmmCpuPrivate->TokenList) {\r
1147 gSmmCpuPrivate->FirstFreeToken = AllocateTokenBuffer ();\r
b948a496 1148 }\r
053e878b
MK
1149\r
1150 NewToken = PROCEDURE_TOKEN_FROM_LINK (gSmmCpuPrivate->FirstFreeToken);\r
3fdc47c6 1151 gSmmCpuPrivate->FirstFreeToken = GetNextNode (&gSmmCpuPrivate->TokenList, gSmmCpuPrivate->FirstFreeToken);\r
51dd408a 1152\r
b948a496
ED
1153 NewToken->RunningApCount = RunningApsCount;\r
1154 AcquireSpinLock (NewToken->SpinLock);\r
51dd408a 1155\r
b948a496 1156 return NewToken;\r
51dd408a
ED
1157}\r
1158\r
1159/**\r
1160 Checks status of specified AP.\r
1161\r
1162 This function checks whether the specified AP has finished the task assigned\r
1163 by StartupThisAP(), and whether timeout expires.\r
1164\r
1165 @param[in] Token This parameter describes the token that was passed into DispatchProcedure or\r
1166 BroadcastProcedure.\r
1167\r
1168 @retval EFI_SUCCESS Specified AP has finished task assigned by StartupThisAPs().\r
1169 @retval EFI_NOT_READY Specified AP has not finished task and timeout has not expired.\r
1170**/\r
1171EFI_STATUS\r
1172IsApReady (\r
053e878b 1173 IN SPIN_LOCK *Token\r
51dd408a
ED
1174 )\r
1175{\r
1176 if (AcquireSpinLockOrFail (Token)) {\r
1177 ReleaseSpinLock (Token);\r
1178 return EFI_SUCCESS;\r
1179 }\r
1180\r
1181 return EFI_NOT_READY;\r
1182}\r
1183\r
529a5a86
MK
1184/**\r
1185 Schedule a procedure to run on the specified CPU.\r
1186\r
717fb604
JY
1187 @param[in] Procedure The address of the procedure to run\r
1188 @param[in] CpuIndex Target CPU Index\r
51dd408a
ED
1189 @param[in,out] ProcArguments The parameter to pass to the procedure\r
1190 @param[in] Token This is an optional parameter that allows the caller to execute the\r
1191 procedure in a blocking or non-blocking fashion. If it is NULL the\r
1192 call is blocking, and the call will not return until the AP has\r
1193 completed the procedure. If the token is not NULL, the call will\r
1194 return immediately. The caller can check whether the procedure has\r
1195 completed with CheckOnProcedure or WaitForProcedure.\r
1196 @param[in] TimeoutInMicroseconds Indicates the time limit in microseconds for the APs to finish\r
1197 execution of Procedure, either for blocking or non-blocking mode.\r
1198 Zero means infinity. If the timeout expires before all APs return\r
1199 from Procedure, then Procedure on the failed APs is terminated. If\r
1200 the timeout expires in blocking mode, the call returns EFI_TIMEOUT.\r
1201 If the timeout expires in non-blocking mode, the timeout determined\r
1202 can be through CheckOnProcedure or WaitForProcedure.\r
1203 Note that timeout support is optional. Whether an implementation\r
1204 supports this feature can be determined via the Attributes data\r
1205 member.\r
1206 @param[in,out] CpuStatus This optional pointer may be used to get the status code returned\r
1207 by Procedure when it completes execution on the target AP, or with\r
1208 EFI_TIMEOUT if the Procedure fails to complete within the optional\r
1209 timeout. The implementation will update this variable with\r
1210 EFI_NOT_READY prior to starting Procedure on the target AP.\r
529a5a86
MK
1211\r
1212 @retval EFI_INVALID_PARAMETER CpuNumber not valid\r
1213 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP\r
1214 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM\r
1215 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy\r
1216 @retval EFI_SUCCESS The procedure has been successfully scheduled\r
1217\r
1218**/\r
1219EFI_STATUS\r
717fb604 1220InternalSmmStartupThisAp (\r
053e878b
MK
1221 IN EFI_AP_PROCEDURE2 Procedure,\r
1222 IN UINTN CpuIndex,\r
1223 IN OUT VOID *ProcArguments OPTIONAL,\r
1224 IN MM_COMPLETION *Token,\r
1225 IN UINTN TimeoutInMicroseconds,\r
1226 IN OUT EFI_STATUS *CpuStatus\r
529a5a86
MK
1227 )\r
1228{\r
053e878b 1229 PROCEDURE_TOKEN *ProcToken;\r
a457823f 1230\r
717fb604 1231 if (CpuIndex >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus) {\r
053e878b 1232 DEBUG ((DEBUG_ERROR, "CpuIndex(%d) >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus(%d)\n", CpuIndex, gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus));\r
717fb604
JY
1233 return EFI_INVALID_PARAMETER;\r
1234 }\r
053e878b 1235\r
717fb604 1236 if (CpuIndex == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) {\r
053e878b 1237 DEBUG ((DEBUG_ERROR, "CpuIndex(%d) == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu\n", CpuIndex));\r
529a5a86
MK
1238 return EFI_INVALID_PARAMETER;\r
1239 }\r
053e878b 1240\r
b7025df8
JF
1241 if (gSmmCpuPrivate->ProcessorInfo[CpuIndex].ProcessorId == INVALID_APIC_ID) {\r
1242 return EFI_INVALID_PARAMETER;\r
1243 }\r
053e878b 1244\r
717fb604
JY
1245 if (!(*(mSmmMpSyncData->CpuData[CpuIndex].Present))) {\r
1246 if (mSmmMpSyncData->EffectiveSyncMode == SmmCpuSyncModeTradition) {\r
053e878b 1247 DEBUG ((DEBUG_ERROR, "!mSmmMpSyncData->CpuData[%d].Present\n", CpuIndex));\r
717fb604 1248 }\r
053e878b 1249\r
717fb604
JY
1250 return EFI_INVALID_PARAMETER;\r
1251 }\r
053e878b 1252\r
717fb604
JY
1253 if (gSmmCpuPrivate->Operation[CpuIndex] == SmmCpuRemove) {\r
1254 if (!FeaturePcdGet (PcdCpuHotPlugSupport)) {\r
053e878b 1255 DEBUG ((DEBUG_ERROR, "gSmmCpuPrivate->Operation[%d] == SmmCpuRemove\n", CpuIndex));\r
717fb604 1256 }\r
053e878b 1257\r
717fb604
JY
1258 return EFI_INVALID_PARAMETER;\r
1259 }\r
053e878b 1260\r
51dd408a
ED
1261 if ((TimeoutInMicroseconds != 0) && ((mSmmMp.Attributes & EFI_MM_MP_TIMEOUT_SUPPORTED) == 0)) {\r
1262 return EFI_INVALID_PARAMETER;\r
1263 }\r
053e878b 1264\r
51dd408a
ED
1265 if (Procedure == NULL) {\r
1266 return EFI_INVALID_PARAMETER;\r
1267 }\r
717fb604 1268\r
832c4c7a 1269 AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
51dd408a 1270\r
529a5a86
MK
1271 mSmmMpSyncData->CpuData[CpuIndex].Procedure = Procedure;\r
1272 mSmmMpSyncData->CpuData[CpuIndex].Parameter = ProcArguments;\r
51dd408a 1273 if (Token != NULL) {\r
54ba08c6
RN
1274 if (Token != &mSmmStartupThisApToken) {\r
1275 //\r
1276 // When Token points to mSmmStartupThisApToken, this routine is called\r
1277 // from SmmStartupThisAp() in non-blocking mode (PcdCpuSmmBlockStartupThisAp == FALSE).\r
1278 //\r
1279 // In this case, caller wants to startup AP procedure in non-blocking\r
1280 // mode and cannot get the completion status from the Token because there\r
1281 // is no way to return the Token to caller from SmmStartupThisAp().\r
1282 // Caller needs to use its implementation specific way to query the completion status.\r
1283 //\r
1284 // There is no need to allocate a token for such case so the 3 overheads\r
1285 // can be avoided:\r
1286 // 1. Call AllocateTokenBuffer() when there is no free token.\r
1287 // 2. Get a free token from the token buffer.\r
1288 // 3. Call ReleaseToken() in APHandler().\r
1289 //\r
053e878b 1290 ProcToken = GetFreeToken (1);\r
54ba08c6 1291 mSmmMpSyncData->CpuData[CpuIndex].Token = ProcToken;\r
053e878b 1292 *Token = (MM_COMPLETION)ProcToken->SpinLock;\r
54ba08c6 1293 }\r
51dd408a 1294 }\r
053e878b
MK
1295\r
1296 mSmmMpSyncData->CpuData[CpuIndex].Status = CpuStatus;\r
51dd408a
ED
1297 if (mSmmMpSyncData->CpuData[CpuIndex].Status != NULL) {\r
1298 *mSmmMpSyncData->CpuData[CpuIndex].Status = EFI_NOT_READY;\r
1299 }\r
1300\r
ed3d5ecb 1301 ReleaseSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r
529a5a86 1302\r
51dd408a 1303 if (Token == NULL) {\r
ed3d5ecb
JF
1304 AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
1305 ReleaseSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
529a5a86 1306 }\r
51dd408a
ED
1307\r
1308 return EFI_SUCCESS;\r
1309}\r
1310\r
1311/**\r
1312 Worker function to execute a caller provided function on all enabled APs.\r
1313\r
1314 @param[in] Procedure A pointer to the function to be run on\r
1315 enabled APs of the system.\r
1316 @param[in] TimeoutInMicroseconds Indicates the time limit in microseconds for\r
1317 APs to return from Procedure, either for\r
1318 blocking or non-blocking mode.\r
1319 @param[in,out] ProcedureArguments The parameter passed into Procedure for\r
1320 all APs.\r
1321 @param[in,out] Token This is an optional parameter that allows the caller to execute the\r
1322 procedure in a blocking or non-blocking fashion. If it is NULL the\r
1323 call is blocking, and the call will not return until the AP has\r
1324 completed the procedure. If the token is not NULL, the call will\r
1325 return immediately. The caller can check whether the procedure has\r
1326 completed with CheckOnProcedure or WaitForProcedure.\r
1327 @param[in,out] CPUStatus This optional pointer may be used to get the status code returned\r
1328 by Procedure when it completes execution on the target AP, or with\r
1329 EFI_TIMEOUT if the Procedure fails to complete within the optional\r
1330 timeout. The implementation will update this variable with\r
1331 EFI_NOT_READY prior to starting Procedure on the target AP.\r
1332\r
1333\r
1334 @retval EFI_SUCCESS In blocking mode, all APs have finished before\r
1335 the timeout expired.\r
1336 @retval EFI_SUCCESS In non-blocking mode, function has been dispatched\r
1337 to all enabled APs.\r
1338 @retval others Failed to Startup all APs.\r
1339\r
1340**/\r
1341EFI_STATUS\r
1342InternalSmmStartupAllAPs (\r
053e878b
MK
1343 IN EFI_AP_PROCEDURE2 Procedure,\r
1344 IN UINTN TimeoutInMicroseconds,\r
1345 IN OUT VOID *ProcedureArguments OPTIONAL,\r
1346 IN OUT MM_COMPLETION *Token,\r
1347 IN OUT EFI_STATUS *CPUStatus\r
51dd408a
ED
1348 )\r
1349{\r
053e878b
MK
1350 UINTN Index;\r
1351 UINTN CpuCount;\r
1352 PROCEDURE_TOKEN *ProcToken;\r
51dd408a
ED
1353\r
1354 if ((TimeoutInMicroseconds != 0) && ((mSmmMp.Attributes & EFI_MM_MP_TIMEOUT_SUPPORTED) == 0)) {\r
1355 return EFI_INVALID_PARAMETER;\r
1356 }\r
053e878b 1357\r
51dd408a
ED
1358 if (Procedure == NULL) {\r
1359 return EFI_INVALID_PARAMETER;\r
1360 }\r
1361\r
1362 CpuCount = 0;\r
70911f1f 1363 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
51dd408a 1364 if (IsPresentAp (Index)) {\r
053e878b 1365 CpuCount++;\r
51dd408a
ED
1366\r
1367 if (gSmmCpuPrivate->Operation[Index] == SmmCpuRemove) {\r
1368 return EFI_INVALID_PARAMETER;\r
1369 }\r
1370\r
053e878b 1371 if (!AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[Index].Busy)) {\r
51dd408a
ED
1372 return EFI_NOT_READY;\r
1373 }\r
053e878b 1374\r
51dd408a
ED
1375 ReleaseSpinLock (mSmmMpSyncData->CpuData[Index].Busy);\r
1376 }\r
1377 }\r
053e878b 1378\r
51dd408a
ED
1379 if (CpuCount == 0) {\r
1380 return EFI_NOT_STARTED;\r
1381 }\r
1382\r
1383 if (Token != NULL) {\r
b948a496 1384 ProcToken = GetFreeToken ((UINT32)mMaxNumberOfCpus);\r
053e878b 1385 *Token = (MM_COMPLETION)ProcToken->SpinLock;\r
a457823f
ED
1386 } else {\r
1387 ProcToken = NULL;\r
51dd408a
ED
1388 }\r
1389\r
1390 //\r
1391 // Make sure all BUSY should be acquired.\r
1392 //\r
1393 // Because former code already check mSmmMpSyncData->CpuData[***].Busy for each AP.\r
1394 // Here code always use AcquireSpinLock instead of AcquireSpinLockOrFail for not\r
1395 // block mode.\r
1396 //\r
70911f1f 1397 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
51dd408a
ED
1398 if (IsPresentAp (Index)) {\r
1399 AcquireSpinLock (mSmmMpSyncData->CpuData[Index].Busy);\r
1400 }\r
1401 }\r
1402\r
70911f1f 1403 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
51dd408a 1404 if (IsPresentAp (Index)) {\r
053e878b 1405 mSmmMpSyncData->CpuData[Index].Procedure = (EFI_AP_PROCEDURE2)Procedure;\r
51dd408a 1406 mSmmMpSyncData->CpuData[Index].Parameter = ProcedureArguments;\r
a457823f 1407 if (ProcToken != NULL) {\r
053e878b 1408 mSmmMpSyncData->CpuData[Index].Token = ProcToken;\r
51dd408a 1409 }\r
053e878b 1410\r
51dd408a 1411 if (CPUStatus != NULL) {\r
053e878b 1412 mSmmMpSyncData->CpuData[Index].Status = &CPUStatus[Index];\r
51dd408a
ED
1413 if (mSmmMpSyncData->CpuData[Index].Status != NULL) {\r
1414 *mSmmMpSyncData->CpuData[Index].Status = EFI_NOT_READY;\r
1415 }\r
1416 }\r
1417 } else {\r
1418 //\r
1419 // PI spec requirement:\r
1420 // For every excluded processor, the array entry must contain a value of EFI_NOT_STARTED.\r
1421 //\r
1422 if (CPUStatus != NULL) {\r
1423 CPUStatus[Index] = EFI_NOT_STARTED;\r
1424 }\r
a457823f
ED
1425\r
1426 //\r
1427 // Decrease the count to mark this processor(AP or BSP) as finished.\r
1428 //\r
1429 if (ProcToken != NULL) {\r
1430 WaitForSemaphore (&ProcToken->RunningApCount);\r
1431 }\r
51dd408a
ED
1432 }\r
1433 }\r
1434\r
1435 ReleaseAllAPs ();\r
1436\r
1437 if (Token == NULL) {\r
1438 //\r
1439 // Make sure all APs have completed their tasks.\r
1440 //\r
1441 WaitForAllAPsNotBusy (TRUE);\r
1442 }\r
1443\r
1444 return EFI_SUCCESS;\r
1445}\r
1446\r
1447/**\r
1448 ISO C99 6.5.2.2 "Function calls", paragraph 9:\r
1449 If the function is defined with a type that is not compatible with\r
1450 the type (of the expression) pointed to by the expression that\r
1451 denotes the called function, the behavior is undefined.\r
1452\r
1453 So add below wrapper function to convert between EFI_AP_PROCEDURE\r
1454 and EFI_AP_PROCEDURE2.\r
1455\r
1456 Wrapper for Procedures.\r
1457\r
1458 @param[in] Buffer Pointer to PROCEDURE_WRAPPER buffer.\r
1459\r
1460**/\r
1461EFI_STATUS\r
1462EFIAPI\r
1463ProcedureWrapper (\r
053e878b 1464 IN VOID *Buffer\r
51dd408a
ED
1465 )\r
1466{\r
053e878b 1467 PROCEDURE_WRAPPER *Wrapper;\r
51dd408a
ED
1468\r
1469 Wrapper = Buffer;\r
1470 Wrapper->Procedure (Wrapper->ProcedureArgument);\r
1471\r
529a5a86
MK
1472 return EFI_SUCCESS;\r
1473}\r
1474\r
717fb604
JY
1475/**\r
1476 Schedule a procedure to run on the specified CPU in blocking mode.\r
1477\r
1478 @param[in] Procedure The address of the procedure to run\r
1479 @param[in] CpuIndex Target CPU Index\r
1480 @param[in, out] ProcArguments The parameter to pass to the procedure\r
1481\r
1482 @retval EFI_INVALID_PARAMETER CpuNumber not valid\r
1483 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP\r
1484 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM\r
1485 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy\r
1486 @retval EFI_SUCCESS The procedure has been successfully scheduled\r
1487\r
1488**/\r
1489EFI_STATUS\r
1490EFIAPI\r
1491SmmBlockingStartupThisAp (\r
053e878b
MK
1492 IN EFI_AP_PROCEDURE Procedure,\r
1493 IN UINTN CpuIndex,\r
1494 IN OUT VOID *ProcArguments OPTIONAL\r
717fb604
JY
1495 )\r
1496{\r
51dd408a
ED
1497 PROCEDURE_WRAPPER Wrapper;\r
1498\r
053e878b 1499 Wrapper.Procedure = Procedure;\r
51dd408a
ED
1500 Wrapper.ProcedureArgument = ProcArguments;\r
1501\r
1502 //\r
1503 // Use wrapper function to convert EFI_AP_PROCEDURE to EFI_AP_PROCEDURE2.\r
1504 //\r
1505 return InternalSmmStartupThisAp (ProcedureWrapper, CpuIndex, &Wrapper, NULL, 0, NULL);\r
717fb604
JY
1506}\r
1507\r
1508/**\r
1509 Schedule a procedure to run on the specified CPU.\r
1510\r
1511 @param Procedure The address of the procedure to run\r
1512 @param CpuIndex Target CPU Index\r
1513 @param ProcArguments The parameter to pass to the procedure\r
1514\r
1515 @retval EFI_INVALID_PARAMETER CpuNumber not valid\r
1516 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP\r
1517 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM\r
1518 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy\r
1519 @retval EFI_SUCCESS The procedure has been successfully scheduled\r
1520\r
1521**/\r
1522EFI_STATUS\r
1523EFIAPI\r
1524SmmStartupThisAp (\r
053e878b
MK
1525 IN EFI_AP_PROCEDURE Procedure,\r
1526 IN UINTN CpuIndex,\r
1527 IN OUT VOID *ProcArguments OPTIONAL\r
717fb604
JY
1528 )\r
1529{\r
053e878b 1530 gSmmCpuPrivate->ApWrapperFunc[CpuIndex].Procedure = Procedure;\r
51dd408a
ED
1531 gSmmCpuPrivate->ApWrapperFunc[CpuIndex].ProcedureArgument = ProcArguments;\r
1532\r
1533 //\r
1534 // Use wrapper function to convert EFI_AP_PROCEDURE to EFI_AP_PROCEDURE2.\r
1535 //\r
1536 return InternalSmmStartupThisAp (\r
053e878b
MK
1537 ProcedureWrapper,\r
1538 CpuIndex,\r
1539 &gSmmCpuPrivate->ApWrapperFunc[CpuIndex],\r
1540 FeaturePcdGet (PcdCpuSmmBlockStartupThisAp) ? NULL : &mSmmStartupThisApToken,\r
1541 0,\r
1542 NULL\r
1543 );\r
717fb604
JY
1544}\r
1545\r
f45f2d4a 1546/**\r
3eed6dda 1547 This function sets DR6 & DR7 according to SMM save state, before running SMM C code.\r
f45f2d4a
JY
1548 They are useful when you want to enable hardware breakpoints in SMM without entry SMM mode.\r
1549\r
1550 NOTE: It might not be appreciated in runtime since it might\r
1551 conflict with OS debugging facilities. Turn them off in RELEASE.\r
1552\r
1553 @param CpuIndex CPU Index\r
1554\r
1555**/\r
1556VOID\r
1557EFIAPI\r
1558CpuSmmDebugEntry (\r
1559 IN UINTN CpuIndex\r
1560 )\r
1561{\r
053e878b 1562 SMRAM_SAVE_STATE_MAP *CpuSaveState;\r
7367cc6c 1563\r
f45f2d4a 1564 if (FeaturePcdGet (PcdCpuSmmDebug)) {\r
053e878b 1565 ASSERT (CpuIndex < mMaxNumberOfCpus);\r
3eed6dda 1566 CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];\r
f45f2d4a
JY
1567 if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {\r
1568 AsmWriteDr6 (CpuSaveState->x86._DR6);\r
1569 AsmWriteDr7 (CpuSaveState->x86._DR7);\r
1570 } else {\r
1571 AsmWriteDr6 ((UINTN)CpuSaveState->x64._DR6);\r
1572 AsmWriteDr7 ((UINTN)CpuSaveState->x64._DR7);\r
1573 }\r
1574 }\r
1575}\r
1576\r
1577/**\r
3eed6dda 1578 This function restores DR6 & DR7 to SMM save state.\r
f45f2d4a
JY
1579\r
1580 NOTE: It might not be appreciated in runtime since it might\r
1581 conflict with OS debugging facilities. Turn them off in RELEASE.\r
1582\r
1583 @param CpuIndex CPU Index\r
1584\r
1585**/\r
1586VOID\r
1587EFIAPI\r
1588CpuSmmDebugExit (\r
1589 IN UINTN CpuIndex\r
1590 )\r
1591{\r
053e878b 1592 SMRAM_SAVE_STATE_MAP *CpuSaveState;\r
f45f2d4a
JY
1593\r
1594 if (FeaturePcdGet (PcdCpuSmmDebug)) {\r
053e878b 1595 ASSERT (CpuIndex < mMaxNumberOfCpus);\r
3eed6dda 1596 CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];\r
f45f2d4a
JY
1597 if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {\r
1598 CpuSaveState->x86._DR7 = (UINT32)AsmReadDr7 ();\r
1599 CpuSaveState->x86._DR6 = (UINT32)AsmReadDr6 ();\r
1600 } else {\r
1601 CpuSaveState->x64._DR7 = AsmReadDr7 ();\r
1602 CpuSaveState->x64._DR6 = AsmReadDr6 ();\r
1603 }\r
1604 }\r
1605}\r
1606\r
529a5a86
MK
1607/**\r
1608 C function for SMI entry, each processor comes here upon SMI trigger.\r
1609\r
1610 @param CpuIndex CPU Index\r
1611\r
1612**/\r
1613VOID\r
1614EFIAPI\r
1615SmiRendezvous (\r
053e878b 1616 IN UINTN CpuIndex\r
529a5a86
MK
1617 )\r
1618{\r
053e878b
MK
1619 EFI_STATUS Status;\r
1620 BOOLEAN ValidSmi;\r
1621 BOOLEAN IsBsp;\r
1622 BOOLEAN BspInProgress;\r
1623 UINTN Index;\r
1624 UINTN Cr2;\r
717fb604 1625\r
053e878b 1626 ASSERT (CpuIndex < mMaxNumberOfCpus);\r
529a5a86
MK
1627\r
1628 //\r
37f9fea5
VN
1629 // Save Cr2 because Page Fault exception in SMM may override its value,\r
1630 // when using on-demand paging for above 4G memory.\r
529a5a86 1631 //\r
37f9fea5
VN
1632 Cr2 = 0;\r
1633 SaveCr2 (&Cr2);\r
529a5a86 1634\r
51dd408a
ED
1635 //\r
1636 // Call the user register Startup function first.\r
1637 //\r
1638 if (mSmmMpSyncData->StartupProcedure != NULL) {\r
1639 mSmmMpSyncData->StartupProcedure (mSmmMpSyncData->StartupProcArgs);\r
1640 }\r
1641\r
529a5a86
MK
1642 //\r
1643 // Perform CPU specific entry hooks\r
1644 //\r
1645 SmmCpuFeaturesRendezvousEntry (CpuIndex);\r
1646\r
1647 //\r
1648 // Determine if this is a valid SMI\r
1649 //\r
053e878b 1650 ValidSmi = PlatformValidSmi ();\r
529a5a86
MK
1651\r
1652 //\r
1653 // Determine if BSP has been already in progress. Note this must be checked after\r
1654 // ValidSmi because BSP may clear a valid SMI source after checking in.\r
1655 //\r
fe3a75bc 1656 BspInProgress = *mSmmMpSyncData->InsideSmm;\r
529a5a86
MK
1657\r
1658 if (!BspInProgress && !ValidSmi) {\r
1659 //\r
1660 // If we reach here, it means when we sampled the ValidSmi flag, SMI status had not\r
1661 // been cleared by BSP in a new SMI run (so we have a truly invalid SMI), or SMI\r
1662 // status had been cleared by BSP and an existing SMI run has almost ended. (Note\r
1663 // we sampled ValidSmi flag BEFORE judging BSP-in-progress status.) In both cases, there\r
1664 // is nothing we need to do.\r
1665 //\r
1666 goto Exit;\r
1667 } else {\r
1668 //\r
1669 // Signal presence of this processor\r
1670 //\r
fe3a75bc 1671 if (ReleaseSemaphore (mSmmMpSyncData->Counter) == 0) {\r
529a5a86
MK
1672 //\r
1673 // BSP has already ended the synchronization, so QUIT!!!\r
1674 //\r
1675\r
1676 //\r
1677 // Wait for BSP's signal to finish SMI\r
1678 //\r
fe3a75bc 1679 while (*mSmmMpSyncData->AllCpusInSync) {\r
529a5a86
MK
1680 CpuPause ();\r
1681 }\r
053e878b 1682\r
529a5a86
MK
1683 goto Exit;\r
1684 } else {\r
529a5a86
MK
1685 //\r
1686 // The BUSY lock is initialized to Released state.\r
1687 // This needs to be done early enough to be ready for BSP's SmmStartupThisAp() call.\r
1688 // E.g., with Relaxed AP flow, SmmStartupThisAp() may be called immediately\r
1689 // after AP's present flag is detected.\r
1690 //\r
ed3d5ecb 1691 InitializeSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
529a5a86
MK
1692 }\r
1693\r
529a5a86
MK
1694 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r
1695 ActivateSmmProfile (CpuIndex);\r
1696 }\r
1697\r
1698 if (BspInProgress) {\r
1699 //\r
1700 // BSP has been elected. Follow AP path, regardless of ValidSmi flag\r
1701 // as BSP may have cleared the SMI status\r
1702 //\r
1703 APHandler (CpuIndex, ValidSmi, mSmmMpSyncData->EffectiveSyncMode);\r
1704 } else {\r
1705 //\r
1706 // We have a valid SMI\r
1707 //\r
1708\r
1709 //\r
1710 // Elect BSP\r
1711 //\r
1712 IsBsp = FALSE;\r
1713 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {\r
1714 if (!mSmmMpSyncData->SwitchBsp || mSmmMpSyncData->CandidateBsp[CpuIndex]) {\r
1715 //\r
1716 // Call platform hook to do BSP election\r
1717 //\r
1718 Status = PlatformSmmBspElection (&IsBsp);\r
1719 if (EFI_SUCCESS == Status) {\r
1720 //\r
1721 // Platform hook determines successfully\r
1722 //\r
1723 if (IsBsp) {\r
1724 mSmmMpSyncData->BspIndex = (UINT32)CpuIndex;\r
1725 }\r
1726 } else {\r
1727 //\r
1728 // Platform hook fails to determine, use default BSP election method\r
1729 //\r
1730 InterlockedCompareExchange32 (\r
053e878b 1731 (UINT32 *)&mSmmMpSyncData->BspIndex,\r
529a5a86
MK
1732 (UINT32)-1,\r
1733 (UINT32)CpuIndex\r
1734 );\r
1735 }\r
1736 }\r
1737 }\r
1738\r
1739 //\r
1740 // "mSmmMpSyncData->BspIndex == CpuIndex" means this is the BSP\r
1741 //\r
1742 if (mSmmMpSyncData->BspIndex == CpuIndex) {\r
529a5a86
MK
1743 //\r
1744 // Clear last request for SwitchBsp.\r
1745 //\r
1746 if (mSmmMpSyncData->SwitchBsp) {\r
1747 mSmmMpSyncData->SwitchBsp = FALSE;\r
1748 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
1749 mSmmMpSyncData->CandidateBsp[Index] = FALSE;\r
1750 }\r
1751 }\r
1752\r
1753 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r
1754 SmmProfileRecordSmiNum ();\r
1755 }\r
1756\r
1757 //\r
1758 // BSP Handler is always called with a ValidSmi == TRUE\r
1759 //\r
1760 BSPHandler (CpuIndex, mSmmMpSyncData->EffectiveSyncMode);\r
529a5a86
MK
1761 } else {\r
1762 APHandler (CpuIndex, ValidSmi, mSmmMpSyncData->EffectiveSyncMode);\r
1763 }\r
1764 }\r
1765\r
ed3d5ecb 1766 ASSERT (*mSmmMpSyncData->CpuData[CpuIndex].Run == 0);\r
529a5a86
MK
1767\r
1768 //\r
1769 // Wait for BSP's signal to exit SMI\r
1770 //\r
fe3a75bc 1771 while (*mSmmMpSyncData->AllCpusInSync) {\r
529a5a86
MK
1772 CpuPause ();\r
1773 }\r
1774 }\r
1775\r
1776Exit:\r
1777 SmmCpuFeaturesRendezvousExit (CpuIndex);\r
37f9fea5 1778\r
529a5a86
MK
1779 //\r
1780 // Restore Cr2\r
1781 //\r
37f9fea5 1782 RestoreCr2 (Cr2);\r
529a5a86
MK
1783}\r
1784\r
51dd408a
ED
1785/**\r
1786 Allocate buffer for SpinLock and Wrapper function buffer.\r
1787\r
1788**/\r
1789VOID\r
1790InitializeDataForMmMp (\r
1791 VOID\r
1792 )\r
1793{\r
1794 gSmmCpuPrivate->ApWrapperFunc = AllocatePool (sizeof (PROCEDURE_WRAPPER) * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus);\r
1795 ASSERT (gSmmCpuPrivate->ApWrapperFunc != NULL);\r
1796\r
1797 InitializeListHead (&gSmmCpuPrivate->TokenList);\r
b948a496 1798\r
3fdc47c6 1799 gSmmCpuPrivate->FirstFreeToken = AllocateTokenBuffer ();\r
51dd408a
ED
1800}\r
1801\r
1d648531
JF
1802/**\r
1803 Allocate buffer for all semaphores and spin locks.\r
1804\r
1805**/\r
1806VOID\r
1807InitializeSmmCpuSemaphores (\r
1808 VOID\r
1809 )\r
1810{\r
053e878b
MK
1811 UINTN ProcessorCount;\r
1812 UINTN TotalSize;\r
1813 UINTN GlobalSemaphoresSize;\r
1814 UINTN CpuSemaphoresSize;\r
1815 UINTN SemaphoreSize;\r
1816 UINTN Pages;\r
1817 UINTN *SemaphoreBlock;\r
1818 UINTN SemaphoreAddr;\r
1819\r
1820 SemaphoreSize = GetSpinLockProperties ();\r
1821 ProcessorCount = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;\r
1d648531 1822 GlobalSemaphoresSize = (sizeof (SMM_CPU_SEMAPHORE_GLOBAL) / sizeof (VOID *)) * SemaphoreSize;\r
4e920581 1823 CpuSemaphoresSize = (sizeof (SMM_CPU_SEMAPHORE_CPU) / sizeof (VOID *)) * ProcessorCount * SemaphoreSize;\r
053e878b
MK
1824 TotalSize = GlobalSemaphoresSize + CpuSemaphoresSize;\r
1825 DEBUG ((DEBUG_INFO, "One Semaphore Size = 0x%x\n", SemaphoreSize));\r
1826 DEBUG ((DEBUG_INFO, "Total Semaphores Size = 0x%x\n", TotalSize));\r
1827 Pages = EFI_SIZE_TO_PAGES (TotalSize);\r
1d648531
JF
1828 SemaphoreBlock = AllocatePages (Pages);\r
1829 ASSERT (SemaphoreBlock != NULL);\r
1830 ZeroMem (SemaphoreBlock, TotalSize);\r
1831\r
053e878b 1832 SemaphoreAddr = (UINTN)SemaphoreBlock;\r
1d648531 1833 mSmmCpuSemaphores.SemaphoreGlobal.Counter = (UINT32 *)SemaphoreAddr;\r
053e878b 1834 SemaphoreAddr += SemaphoreSize;\r
1d648531 1835 mSmmCpuSemaphores.SemaphoreGlobal.InsideSmm = (BOOLEAN *)SemaphoreAddr;\r
053e878b 1836 SemaphoreAddr += SemaphoreSize;\r
1d648531 1837 mSmmCpuSemaphores.SemaphoreGlobal.AllCpusInSync = (BOOLEAN *)SemaphoreAddr;\r
053e878b 1838 SemaphoreAddr += SemaphoreSize;\r
1d648531 1839 mSmmCpuSemaphores.SemaphoreGlobal.PFLock = (SPIN_LOCK *)SemaphoreAddr;\r
053e878b 1840 SemaphoreAddr += SemaphoreSize;\r
1d648531 1841 mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock\r
053e878b 1842 = (SPIN_LOCK *)SemaphoreAddr;\r
6c4c15fa 1843 SemaphoreAddr += SemaphoreSize;\r
6c4c15fa 1844\r
053e878b 1845 SemaphoreAddr = (UINTN)SemaphoreBlock + GlobalSemaphoresSize;\r
4e920581 1846 mSmmCpuSemaphores.SemaphoreCpu.Busy = (SPIN_LOCK *)SemaphoreAddr;\r
053e878b 1847 SemaphoreAddr += ProcessorCount * SemaphoreSize;\r
4e920581 1848 mSmmCpuSemaphores.SemaphoreCpu.Run = (UINT32 *)SemaphoreAddr;\r
053e878b 1849 SemaphoreAddr += ProcessorCount * SemaphoreSize;\r
4e920581
JF
1850 mSmmCpuSemaphores.SemaphoreCpu.Present = (BOOLEAN *)SemaphoreAddr;\r
1851\r
fe3a75bc
JF
1852 mPFLock = mSmmCpuSemaphores.SemaphoreGlobal.PFLock;\r
1853 mConfigSmmCodeAccessCheckLock = mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock;\r
1854\r
1d648531
JF
1855 mSemaphoreSize = SemaphoreSize;\r
1856}\r
529a5a86
MK
1857\r
1858/**\r
1859 Initialize un-cacheable data.\r
1860\r
1861**/\r
1862VOID\r
1863EFIAPI\r
1864InitializeMpSyncData (\r
1865 VOID\r
1866 )\r
1867{\r
053e878b 1868 UINTN CpuIndex;\r
8b9311b7 1869\r
529a5a86 1870 if (mSmmMpSyncData != NULL) {\r
e78a2a49
JF
1871 //\r
1872 // mSmmMpSyncDataSize includes one structure of SMM_DISPATCHER_MP_SYNC_DATA, one\r
1873 // CpuData array of SMM_CPU_DATA_BLOCK and one CandidateBsp array of BOOLEAN.\r
1874 //\r
1875 ZeroMem (mSmmMpSyncData, mSmmMpSyncDataSize);\r
053e878b 1876 mSmmMpSyncData->CpuData = (SMM_CPU_DATA_BLOCK *)((UINT8 *)mSmmMpSyncData + sizeof (SMM_DISPATCHER_MP_SYNC_DATA));\r
529a5a86
MK
1877 mSmmMpSyncData->CandidateBsp = (BOOLEAN *)(mSmmMpSyncData->CpuData + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus);\r
1878 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {\r
1879 //\r
1880 // Enable BSP election by setting BspIndex to -1\r
1881 //\r
1882 mSmmMpSyncData->BspIndex = (UINT32)-1;\r
1883 }\r
053e878b 1884\r
b43dd229 1885 mSmmMpSyncData->EffectiveSyncMode = mCpuSmmSyncMode;\r
1d648531 1886\r
8b9311b7
JF
1887 mSmmMpSyncData->Counter = mSmmCpuSemaphores.SemaphoreGlobal.Counter;\r
1888 mSmmMpSyncData->InsideSmm = mSmmCpuSemaphores.SemaphoreGlobal.InsideSmm;\r
1889 mSmmMpSyncData->AllCpusInSync = mSmmCpuSemaphores.SemaphoreGlobal.AllCpusInSync;\r
053e878b
MK
1890 ASSERT (\r
1891 mSmmMpSyncData->Counter != NULL && mSmmMpSyncData->InsideSmm != NULL &&\r
1892 mSmmMpSyncData->AllCpusInSync != NULL\r
1893 );\r
8b9311b7
JF
1894 *mSmmMpSyncData->Counter = 0;\r
1895 *mSmmMpSyncData->InsideSmm = FALSE;\r
1896 *mSmmMpSyncData->AllCpusInSync = FALSE;\r
1897\r
4a68176c
LZ
1898 mSmmMpSyncData->AllApArrivedWithException = FALSE;\r
1899\r
053e878b
MK
1900 for (CpuIndex = 0; CpuIndex < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; CpuIndex++) {\r
1901 mSmmMpSyncData->CpuData[CpuIndex].Busy =\r
8b9311b7 1902 (SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Busy + mSemaphoreSize * CpuIndex);\r
053e878b 1903 mSmmMpSyncData->CpuData[CpuIndex].Run =\r
8b9311b7
JF
1904 (UINT32 *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Run + mSemaphoreSize * CpuIndex);\r
1905 mSmmMpSyncData->CpuData[CpuIndex].Present =\r
1906 (BOOLEAN *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Present + mSemaphoreSize * CpuIndex);\r
56e4a7d7
JF
1907 *(mSmmMpSyncData->CpuData[CpuIndex].Busy) = 0;\r
1908 *(mSmmMpSyncData->CpuData[CpuIndex].Run) = 0;\r
1909 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;\r
8b9311b7 1910 }\r
529a5a86
MK
1911 }\r
1912}\r
1913\r
1914/**\r
1915 Initialize global data for MP synchronization.\r
1916\r
3eb69b08
JY
1917 @param Stacks Base address of SMI stack buffer for all processors.\r
1918 @param StackSize Stack size for each processor in SMM.\r
1919 @param ShadowStackSize Shadow Stack size for each processor in SMM.\r
529a5a86
MK
1920\r
1921**/\r
1922UINT32\r
1923InitializeMpServiceData (\r
053e878b
MK
1924 IN VOID *Stacks,\r
1925 IN UINTN StackSize,\r
1926 IN UINTN ShadowStackSize\r
529a5a86
MK
1927 )\r
1928{\r
29e300ff
RN
1929 UINT32 Cr3;\r
1930 UINTN Index;\r
1931 UINT8 *GdtTssTables;\r
1932 UINTN GdtTableStepSize;\r
1933 CPUID_VERSION_INFO_EDX RegEdx;\r
1934 UINT32 MaxExtendedFunction;\r
1935 CPUID_VIR_PHY_ADDRESS_SIZE_EAX VirPhyAddressSize;\r
ba40cb31
MK
1936\r
1937 //\r
1938 // Determine if this CPU supports machine check\r
1939 //\r
1940 AsmCpuid (CPUID_VERSION_INFO, NULL, NULL, NULL, &RegEdx.Uint32);\r
1941 mMachineCheckSupported = (BOOLEAN)(RegEdx.Bits.MCA == 1);\r
529a5a86 1942\r
8b9311b7
JF
1943 //\r
1944 // Allocate memory for all locks and semaphores\r
1945 //\r
1946 InitializeSmmCpuSemaphores ();\r
1947\r
d67b73cc
JF
1948 //\r
1949 // Initialize mSmmMpSyncData\r
1950 //\r
1951 mSmmMpSyncDataSize = sizeof (SMM_DISPATCHER_MP_SYNC_DATA) +\r
1952 (sizeof (SMM_CPU_DATA_BLOCK) + sizeof (BOOLEAN)) * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;\r
053e878b 1953 mSmmMpSyncData = (SMM_DISPATCHER_MP_SYNC_DATA *)AllocatePages (EFI_SIZE_TO_PAGES (mSmmMpSyncDataSize));\r
d67b73cc 1954 ASSERT (mSmmMpSyncData != NULL);\r
b43dd229 1955 mCpuSmmSyncMode = (SMM_CPU_SYNC_MODE)PcdGet8 (PcdCpuSmmSyncMode);\r
d67b73cc
JF
1956 InitializeMpSyncData ();\r
1957\r
529a5a86
MK
1958 //\r
1959 // Initialize physical address mask\r
1960 // NOTE: Physical memory above virtual address limit is not supported !!!\r
1961 //\r
29e300ff
RN
1962 AsmCpuid (CPUID_EXTENDED_FUNCTION, &MaxExtendedFunction, NULL, NULL, NULL);\r
1963 if (MaxExtendedFunction >= CPUID_VIR_PHY_ADDRESS_SIZE) {\r
1964 AsmCpuid (CPUID_VIR_PHY_ADDRESS_SIZE, &VirPhyAddressSize.Uint32, NULL, NULL, NULL);\r
1965 } else {\r
1966 VirPhyAddressSize.Bits.PhysicalAddressBits = 36;\r
1967 }\r
053e878b
MK
1968\r
1969 gPhyMask = LShiftU64 (1, VirPhyAddressSize.Bits.PhysicalAddressBits) - 1;\r
29e300ff
RN
1970 //\r
1971 // Clear the low 12 bits\r
1972 //\r
1973 gPhyMask &= 0xfffffffffffff000ULL;\r
529a5a86
MK
1974\r
1975 //\r
1976 // Create page tables\r
1977 //\r
1978 Cr3 = SmmInitPageTable ();\r
1979\r
fe5f1949 1980 GdtTssTables = InitGdt (Cr3, &GdtTableStepSize);\r
529a5a86
MK
1981\r
1982 //\r
f12367a0 1983 // Install SMI handler for each CPU\r
529a5a86
MK
1984 //\r
1985 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
529a5a86
MK
1986 InstallSmiHandler (\r
1987 Index,\r
1988 (UINT32)mCpuHotPlugData.SmBase[Index],\r
053e878b 1989 (VOID *)((UINTN)Stacks + (StackSize + ShadowStackSize) * Index),\r
529a5a86 1990 StackSize,\r
f12367a0
MK
1991 (UINTN)(GdtTssTables + GdtTableStepSize * Index),\r
1992 gcSmiGdtr.Limit + 1,\r
529a5a86
MK
1993 gcSmiIdtr.Base,\r
1994 gcSmiIdtr.Limit + 1,\r
1995 Cr3\r
1996 );\r
1997 }\r
1998\r
529a5a86
MK
1999 //\r
2000 // Record current MTRR settings\r
2001 //\r
26ab5ac3
MK
2002 ZeroMem (&gSmiMtrrs, sizeof (gSmiMtrrs));\r
2003 MtrrGetAllMtrrs (&gSmiMtrrs);\r
529a5a86
MK
2004\r
2005 return Cr3;\r
2006}\r
2007\r
2008/**\r
2009\r
2010 Register the SMM Foundation entry point.\r
2011\r
2012 @param This Pointer to EFI_SMM_CONFIGURATION_PROTOCOL instance\r
2013 @param SmmEntryPoint SMM Foundation EntryPoint\r
2014\r
2015 @retval EFI_SUCCESS Successfully to register SMM foundation entry point\r
2016\r
2017**/\r
2018EFI_STATUS\r
2019EFIAPI\r
2020RegisterSmmEntry (\r
2021 IN CONST EFI_SMM_CONFIGURATION_PROTOCOL *This,\r
2022 IN EFI_SMM_ENTRY_POINT SmmEntryPoint\r
2023 )\r
2024{\r
2025 //\r
2026 // Record SMM Foundation EntryPoint, later invoke it on SMI entry vector.\r
2027 //\r
2028 gSmmCpuPrivate->SmmCoreEntry = SmmEntryPoint;\r
2029 return EFI_SUCCESS;\r
2030}\r
51dd408a
ED
2031\r
2032/**\r
2033\r
2034 Register the SMM Foundation entry point.\r
2035\r
2036 @param[in] Procedure A pointer to the code stream to be run on the designated target AP\r
2037 of the system. Type EFI_AP_PROCEDURE is defined below in Volume 2\r
2038 with the related definitions of\r
2039 EFI_MP_SERVICES_PROTOCOL.StartupAllAPs.\r
2040 If caller may pass a value of NULL to deregister any existing\r
2041 startup procedure.\r
073f2ced 2042 @param[in,out] ProcedureArguments Allows the caller to pass a list of parameters to the code that is\r
51dd408a
ED
2043 run by the AP. It is an optional common mailbox between APs and\r
2044 the caller to share information\r
2045\r
2046 @retval EFI_SUCCESS The Procedure has been set successfully.\r
2047 @retval EFI_INVALID_PARAMETER The Procedure is NULL but ProcedureArguments not NULL.\r
2048\r
2049**/\r
2050EFI_STATUS\r
2051RegisterStartupProcedure (\r
053e878b
MK
2052 IN EFI_AP_PROCEDURE Procedure,\r
2053 IN OUT VOID *ProcedureArguments OPTIONAL\r
51dd408a
ED
2054 )\r
2055{\r
053e878b 2056 if ((Procedure == NULL) && (ProcedureArguments != NULL)) {\r
51dd408a
ED
2057 return EFI_INVALID_PARAMETER;\r
2058 }\r
053e878b 2059\r
51dd408a
ED
2060 if (mSmmMpSyncData == NULL) {\r
2061 return EFI_NOT_READY;\r
2062 }\r
2063\r
2064 mSmmMpSyncData->StartupProcedure = Procedure;\r
2065 mSmmMpSyncData->StartupProcArgs = ProcedureArguments;\r
2066\r
2067 return EFI_SUCCESS;\r
2068}\r