]> git.proxmox.com Git - mirror_edk2.git/blame - UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c
UefiCpuPkg/PiSmmCpuDxeSmm: Improve the performance of GetFreeToken()
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / MpService.c
CommitLineData
529a5a86
MK
1/** @file\r
2SMM MP service implementation\r
3\r
70911f1f 4Copyright (c) 2009 - 2020, Intel Corporation. All rights reserved.<BR>\r
241f9149
LD
5Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>\r
6\r
0acd8697 7SPDX-License-Identifier: BSD-2-Clause-Patent\r
529a5a86
MK
8\r
9**/\r
10\r
11#include "PiSmmCpuDxeSmm.h"\r
12\r
13//\r
14// Slots for all MTRR( FIXED MTRR + VARIABLE MTRR + MTRR_LIB_IA32_MTRR_DEF_TYPE)\r
15//\r
26ab5ac3 16MTRR_SETTINGS gSmiMtrrs;\r
529a5a86
MK
17UINT64 gPhyMask;\r
18SMM_DISPATCHER_MP_SYNC_DATA *mSmmMpSyncData = NULL;\r
19UINTN mSmmMpSyncDataSize;\r
1d648531
JF
20SMM_CPU_SEMAPHORES mSmmCpuSemaphores;\r
21UINTN mSemaphoreSize;\r
fe3a75bc 22SPIN_LOCK *mPFLock = NULL;\r
b43dd229 23SMM_CPU_SYNC_MODE mCpuSmmSyncMode;\r
ba40cb31 24BOOLEAN mMachineCheckSupported = FALSE;\r
529a5a86
MK
25\r
26/**\r
27 Performs an atomic compare exchange operation to get semaphore.\r
28 The compare exchange operation must be performed using\r
29 MP safe mechanisms.\r
30\r
31 @param Sem IN: 32-bit unsigned integer\r
32 OUT: original integer - 1\r
33 @return Original integer - 1\r
34\r
35**/\r
36UINT32\r
37WaitForSemaphore (\r
38 IN OUT volatile UINT32 *Sem\r
39 )\r
40{\r
41 UINT32 Value;\r
42\r
43 do {\r
44 Value = *Sem;\r
45 } while (Value == 0 ||\r
46 InterlockedCompareExchange32 (\r
47 (UINT32*)Sem,\r
48 Value,\r
49 Value - 1\r
50 ) != Value);\r
51 return Value - 1;\r
52}\r
53\r
54\r
55/**\r
56 Performs an atomic compare exchange operation to release semaphore.\r
57 The compare exchange operation must be performed using\r
58 MP safe mechanisms.\r
59\r
60 @param Sem IN: 32-bit unsigned integer\r
61 OUT: original integer + 1\r
62 @return Original integer + 1\r
63\r
64**/\r
65UINT32\r
66ReleaseSemaphore (\r
67 IN OUT volatile UINT32 *Sem\r
68 )\r
69{\r
70 UINT32 Value;\r
71\r
72 do {\r
73 Value = *Sem;\r
74 } while (Value + 1 != 0 &&\r
75 InterlockedCompareExchange32 (\r
76 (UINT32*)Sem,\r
77 Value,\r
78 Value + 1\r
79 ) != Value);\r
80 return Value + 1;\r
81}\r
82\r
83/**\r
84 Performs an atomic compare exchange operation to lock semaphore.\r
85 The compare exchange operation must be performed using\r
86 MP safe mechanisms.\r
87\r
88 @param Sem IN: 32-bit unsigned integer\r
89 OUT: -1\r
90 @return Original integer\r
91\r
92**/\r
93UINT32\r
94LockdownSemaphore (\r
95 IN OUT volatile UINT32 *Sem\r
96 )\r
97{\r
98 UINT32 Value;\r
99\r
100 do {\r
101 Value = *Sem;\r
102 } while (InterlockedCompareExchange32 (\r
103 (UINT32*)Sem,\r
104 Value, (UINT32)-1\r
105 ) != Value);\r
106 return Value;\r
107}\r
108\r
109/**\r
110 Wait all APs to performs an atomic compare exchange operation to release semaphore.\r
111\r
112 @param NumberOfAPs AP number\r
113\r
114**/\r
115VOID\r
116WaitForAllAPs (\r
117 IN UINTN NumberOfAPs\r
118 )\r
119{\r
120 UINTN BspIndex;\r
121\r
122 BspIndex = mSmmMpSyncData->BspIndex;\r
123 while (NumberOfAPs-- > 0) {\r
ed3d5ecb 124 WaitForSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
529a5a86
MK
125 }\r
126}\r
127\r
128/**\r
129 Performs an atomic compare exchange operation to release semaphore\r
130 for each AP.\r
131\r
132**/\r
133VOID\r
134ReleaseAllAPs (\r
135 VOID\r
136 )\r
137{\r
138 UINTN Index;\r
529a5a86 139\r
70911f1f 140 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
51dd408a 141 if (IsPresentAp (Index)) {\r
ed3d5ecb 142 ReleaseSemaphore (mSmmMpSyncData->CpuData[Index].Run);\r
529a5a86
MK
143 }\r
144 }\r
145}\r
146\r
147/**\r
148 Checks if all CPUs (with certain exceptions) have checked in for this SMI run\r
149\r
150 @param Exceptions CPU Arrival exception flags.\r
151\r
152 @retval TRUE if all CPUs the have checked in.\r
153 @retval FALSE if at least one Normal AP hasn't checked in.\r
154\r
155**/\r
156BOOLEAN\r
157AllCpusInSmmWithExceptions (\r
158 SMM_CPU_ARRIVAL_EXCEPTIONS Exceptions\r
159 )\r
160{\r
161 UINTN Index;\r
162 SMM_CPU_DATA_BLOCK *CpuData;\r
163 EFI_PROCESSOR_INFORMATION *ProcessorInfo;\r
164\r
fe3a75bc 165 ASSERT (*mSmmMpSyncData->Counter <= mNumberOfCpus);\r
529a5a86 166\r
fe3a75bc 167 if (*mSmmMpSyncData->Counter == mNumberOfCpus) {\r
529a5a86
MK
168 return TRUE;\r
169 }\r
170\r
171 CpuData = mSmmMpSyncData->CpuData;\r
172 ProcessorInfo = gSmmCpuPrivate->ProcessorInfo;\r
70911f1f 173 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
ed3d5ecb 174 if (!(*(CpuData[Index].Present)) && ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID) {\r
529a5a86
MK
175 if (((Exceptions & ARRIVAL_EXCEPTION_DELAYED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmDelayed) != 0) {\r
176 continue;\r
177 }\r
178 if (((Exceptions & ARRIVAL_EXCEPTION_BLOCKED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmBlocked) != 0) {\r
179 continue;\r
180 }\r
181 if (((Exceptions & ARRIVAL_EXCEPTION_SMI_DISABLED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmEnable) != 0) {\r
182 continue;\r
183 }\r
184 return FALSE;\r
185 }\r
186 }\r
187\r
188\r
189 return TRUE;\r
190}\r
191\r
12c66382
ED
192/**\r
193 Has OS enabled Lmce in the MSR_IA32_MCG_EXT_CTL\r
7367cc6c 194\r
12c66382
ED
195 @retval TRUE Os enable lmce.\r
196 @retval FALSE Os not enable lmce.\r
197\r
198**/\r
199BOOLEAN\r
200IsLmceOsEnabled (\r
201 VOID\r
202 )\r
203{\r
204 MSR_IA32_MCG_CAP_REGISTER McgCap;\r
205 MSR_IA32_FEATURE_CONTROL_REGISTER FeatureCtrl;\r
206 MSR_IA32_MCG_EXT_CTL_REGISTER McgExtCtrl;\r
207\r
208 McgCap.Uint64 = AsmReadMsr64 (MSR_IA32_MCG_CAP);\r
209 if (McgCap.Bits.MCG_LMCE_P == 0) {\r
210 return FALSE;\r
211 }\r
212\r
213 FeatureCtrl.Uint64 = AsmReadMsr64 (MSR_IA32_FEATURE_CONTROL);\r
214 if (FeatureCtrl.Bits.LmceOn == 0) {\r
215 return FALSE;\r
216 }\r
217\r
218 McgExtCtrl.Uint64 = AsmReadMsr64 (MSR_IA32_MCG_EXT_CTL);\r
219 return (BOOLEAN) (McgExtCtrl.Bits.LMCE_EN == 1);\r
220}\r
221\r
222/**\r
7367cc6c 223 Return if Local machine check exception signaled.\r
12c66382 224\r
7367cc6c 225 Indicates (when set) that a local machine check exception was generated. This indicates that the current machine-check event was\r
12c66382
ED
226 delivered to only the logical processor.\r
227\r
228 @retval TRUE LMCE was signaled.\r
229 @retval FALSE LMCE was not signaled.\r
230\r
231**/\r
232BOOLEAN\r
233IsLmceSignaled (\r
234 VOID\r
235 )\r
236{\r
237 MSR_IA32_MCG_STATUS_REGISTER McgStatus;\r
238\r
239 McgStatus.Uint64 = AsmReadMsr64 (MSR_IA32_MCG_STATUS);\r
240 return (BOOLEAN) (McgStatus.Bits.LMCE_S == 1);\r
241}\r
529a5a86
MK
242\r
243/**\r
244 Given timeout constraint, wait for all APs to arrive, and insure when this function returns, no AP will execute normal mode code before\r
245 entering SMM, except SMI disabled APs.\r
246\r
247**/\r
248VOID\r
249SmmWaitForApArrival (\r
250 VOID\r
251 )\r
252{\r
253 UINT64 Timer;\r
254 UINTN Index;\r
12c66382
ED
255 BOOLEAN LmceEn;\r
256 BOOLEAN LmceSignal;\r
529a5a86 257\r
fe3a75bc 258 ASSERT (*mSmmMpSyncData->Counter <= mNumberOfCpus);\r
529a5a86 259\r
ba40cb31
MK
260 LmceEn = FALSE;\r
261 LmceSignal = FALSE;\r
262 if (mMachineCheckSupported) {\r
263 LmceEn = IsLmceOsEnabled ();\r
264 LmceSignal = IsLmceSignaled();\r
265 }\r
12c66382 266\r
529a5a86
MK
267 //\r
268 // Platform implementor should choose a timeout value appropriately:\r
269 // - The timeout value should balance the SMM time constrains and the likelihood that delayed CPUs are excluded in the SMM run. Note\r
270 // the SMI Handlers must ALWAYS take into account the cases that not all APs are available in an SMI run.\r
271 // - The timeout value must, in the case of 2nd timeout, be at least long enough to give time for all APs to receive the SMI IPI\r
272 // and either enter SMM or buffer the SMI, to insure there is no CPU running normal mode code when SMI handling starts. This will\r
273 // be TRUE even if a blocked CPU is brought out of the blocked state by a normal mode CPU (before the normal mode CPU received the\r
274 // SMI IPI), because with a buffered SMI, and CPU will enter SMM immediately after it is brought out of the blocked state.\r
275 // - The timeout value must be longer than longest possible IO operation in the system\r
276 //\r
277\r
278 //\r
279 // Sync with APs 1st timeout\r
280 //\r
281 for (Timer = StartSyncTimer ();\r
12c66382 282 !IsSyncTimerTimeout (Timer) && !(LmceEn && LmceSignal) &&\r
529a5a86
MK
283 !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED | ARRIVAL_EXCEPTION_SMI_DISABLED );\r
284 ) {\r
285 CpuPause ();\r
286 }\r
287\r
288 //\r
289 // Not all APs have arrived, so we need 2nd round of timeout. IPIs should be sent to ALL none present APs,\r
290 // because:\r
291 // a) Delayed AP may have just come out of the delayed state. Blocked AP may have just been brought out of blocked state by some AP running\r
292 // normal mode code. These APs need to be guaranteed to have an SMI pending to insure that once they are out of delayed / blocked state, they\r
293 // enter SMI immediately without executing instructions in normal mode. Note traditional flow requires there are no APs doing normal mode\r
294 // work while SMI handling is on-going.\r
295 // b) As a consequence of SMI IPI sending, (spurious) SMI may occur after this SMM run.\r
296 // c) ** NOTE **: Use SMI disabling feature VERY CAREFULLY (if at all) for traditional flow, because a processor in SMI-disabled state\r
297 // will execute normal mode code, which breaks the traditional SMI handlers' assumption that no APs are doing normal\r
298 // mode work while SMI handling is on-going.\r
299 // d) We don't add code to check SMI disabling status to skip sending IPI to SMI disabled APs, because:\r
300 // - In traditional flow, SMI disabling is discouraged.\r
301 // - In relaxed flow, CheckApArrival() will check SMI disabling status before calling this function.\r
302 // In both cases, adding SMI-disabling checking code increases overhead.\r
303 //\r
fe3a75bc 304 if (*mSmmMpSyncData->Counter < mNumberOfCpus) {\r
529a5a86
MK
305 //\r
306 // Send SMI IPIs to bring outside processors in\r
307 //\r
70911f1f 308 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
ed3d5ecb 309 if (!(*(mSmmMpSyncData->CpuData[Index].Present)) && gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID) {\r
529a5a86
MK
310 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId);\r
311 }\r
312 }\r
313\r
314 //\r
315 // Sync with APs 2nd timeout.\r
316 //\r
317 for (Timer = StartSyncTimer ();\r
318 !IsSyncTimerTimeout (Timer) &&\r
319 !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED | ARRIVAL_EXCEPTION_SMI_DISABLED );\r
320 ) {\r
321 CpuPause ();\r
322 }\r
323 }\r
324\r
325 return;\r
326}\r
327\r
328\r
329/**\r
330 Replace OS MTRR's with SMI MTRR's.\r
331\r
332 @param CpuIndex Processor Index\r
333\r
334**/\r
335VOID\r
336ReplaceOSMtrrs (\r
337 IN UINTN CpuIndex\r
338 )\r
339{\r
529a5a86
MK
340 SmmCpuFeaturesDisableSmrr ();\r
341\r
342 //\r
343 // Replace all MTRRs registers\r
344 //\r
26ab5ac3 345 MtrrSetAllMtrrs (&gSmiMtrrs);\r
529a5a86
MK
346}\r
347\r
51dd408a
ED
348/**\r
349 Wheck whether task has been finished by all APs.\r
350\r
351 @param BlockMode Whether did it in block mode or non-block mode.\r
352\r
353 @retval TRUE Task has been finished by all APs.\r
354 @retval FALSE Task not has been finished by all APs.\r
355\r
356**/\r
357BOOLEAN\r
358WaitForAllAPsNotBusy (\r
359 IN BOOLEAN BlockMode\r
360 )\r
361{\r
362 UINTN Index;\r
363\r
70911f1f 364 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
51dd408a
ED
365 //\r
366 // Ignore BSP and APs which not call in SMM.\r
367 //\r
368 if (!IsPresentAp(Index)) {\r
369 continue;\r
370 }\r
371\r
372 if (BlockMode) {\r
373 AcquireSpinLock(mSmmMpSyncData->CpuData[Index].Busy);\r
374 ReleaseSpinLock(mSmmMpSyncData->CpuData[Index].Busy);\r
375 } else {\r
376 if (AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[Index].Busy)) {\r
377 ReleaseSpinLock(mSmmMpSyncData->CpuData[Index].Busy);\r
378 } else {\r
379 return FALSE;\r
380 }\r
381 }\r
382 }\r
383\r
384 return TRUE;\r
385}\r
386\r
387/**\r
388 Check whether it is an present AP.\r
389\r
390 @param CpuIndex The AP index which calls this function.\r
391\r
392 @retval TRUE It's a present AP.\r
393 @retval TRUE This is not an AP or it is not present.\r
394\r
395**/\r
396BOOLEAN\r
397IsPresentAp (\r
398 IN UINTN CpuIndex\r
399 )\r
400{\r
401 return ((CpuIndex != gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) &&\r
402 *(mSmmMpSyncData->CpuData[CpuIndex].Present));\r
403}\r
404\r
51dd408a
ED
405/**\r
406 Clean up the status flags used during executing the procedure.\r
407\r
408 @param CpuIndex The AP index which calls this function.\r
409\r
410**/\r
411VOID\r
412ReleaseToken (\r
413 IN UINTN CpuIndex\r
414 )\r
415{\r
a457823f 416 PROCEDURE_TOKEN *Token;\r
51dd408a 417\r
a457823f
ED
418 Token = mSmmMpSyncData->CpuData[CpuIndex].Token;\r
419\r
420 if (InterlockedDecrement (&Token->RunningApCount) == 0) {\r
421 ReleaseSpinLock (Token->SpinLock);\r
51dd408a 422 }\r
a457823f
ED
423\r
424 mSmmMpSyncData->CpuData[CpuIndex].Token = NULL;\r
51dd408a
ED
425}\r
426\r
427/**\r
428 Free the tokens in the maintained list.\r
429\r
430**/\r
431VOID\r
b948a496 432ResetTokens (\r
51dd408a
ED
433 VOID\r
434 )\r
435{\r
436 LIST_ENTRY *Link;\r
437 PROCEDURE_TOKEN *ProcToken;\r
9caaa79d 438\r
b948a496
ED
439 Link = GetFirstNode (&gSmmCpuPrivate->TokenList);\r
440 while (!IsNull (&gSmmCpuPrivate->TokenList, Link)) {\r
51dd408a
ED
441 ProcToken = PROCEDURE_TOKEN_FROM_LINK (Link);\r
442\r
b948a496
ED
443 ProcToken->RunningApCount = 0;\r
444 ProcToken->Used = FALSE;\r
445\r
446 //\r
447 // Check the spinlock status and release it if not released yet.\r
448 //\r
449 if (!AcquireSpinLockOrFail(ProcToken->SpinLock)) {\r
450 DEBUG((DEBUG_ERROR, "Risk::SpinLock still not released!"));\r
451 }\r
452 ReleaseSpinLock (ProcToken->SpinLock);\r
51dd408a 453\r
b948a496 454 Link = GetNextNode (&gSmmCpuPrivate->TokenList, Link);\r
51dd408a 455 }\r
3fdc47c6
RN
456\r
457 //\r
458 // Reset the FirstFreeToken to the beginning of token list upon exiting SMI.\r
459 //\r
460 gSmmCpuPrivate->FirstFreeToken = GetFirstNode (&gSmmCpuPrivate->TokenList);\r
51dd408a
ED
461}\r
462\r
529a5a86
MK
463/**\r
464 SMI handler for BSP.\r
465\r
466 @param CpuIndex BSP processor Index\r
467 @param SyncMode SMM MP sync mode\r
468\r
469**/\r
470VOID\r
471BSPHandler (\r
472 IN UINTN CpuIndex,\r
473 IN SMM_CPU_SYNC_MODE SyncMode\r
474 )\r
475{\r
476 UINTN Index;\r
477 MTRR_SETTINGS Mtrrs;\r
478 UINTN ApCount;\r
479 BOOLEAN ClearTopLevelSmiResult;\r
480 UINTN PresentCount;\r
481\r
482 ASSERT (CpuIndex == mSmmMpSyncData->BspIndex);\r
483 ApCount = 0;\r
484\r
485 //\r
486 // Flag BSP's presence\r
487 //\r
fe3a75bc 488 *mSmmMpSyncData->InsideSmm = TRUE;\r
529a5a86
MK
489\r
490 //\r
491 // Initialize Debug Agent to start source level debug in BSP handler\r
492 //\r
493 InitializeDebugAgent (DEBUG_AGENT_INIT_ENTER_SMI, NULL, NULL);\r
494\r
495 //\r
496 // Mark this processor's presence\r
497 //\r
ed3d5ecb 498 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = TRUE;\r
529a5a86
MK
499\r
500 //\r
501 // Clear platform top level SMI status bit before calling SMI handlers. If\r
502 // we cleared it after SMI handlers are run, we would miss the SMI that\r
503 // occurs after SMI handlers are done and before SMI status bit is cleared.\r
504 //\r
505 ClearTopLevelSmiResult = ClearTopLevelSmiStatus();\r
506 ASSERT (ClearTopLevelSmiResult == TRUE);\r
507\r
508 //\r
509 // Set running processor index\r
510 //\r
511 gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu = CpuIndex;\r
512\r
513 //\r
514 // If Traditional Sync Mode or need to configure MTRRs: gather all available APs.\r
515 //\r
516 if (SyncMode == SmmCpuSyncModeTradition || SmmCpuFeaturesNeedConfigureMtrrs()) {\r
517\r
518 //\r
519 // Wait for APs to arrive\r
520 //\r
521 SmmWaitForApArrival();\r
522\r
523 //\r
524 // Lock the counter down and retrieve the number of APs\r
525 //\r
fe3a75bc
JF
526 *mSmmMpSyncData->AllCpusInSync = TRUE;\r
527 ApCount = LockdownSemaphore (mSmmMpSyncData->Counter) - 1;\r
529a5a86
MK
528\r
529 //\r
530 // Wait for all APs to get ready for programming MTRRs\r
531 //\r
532 WaitForAllAPs (ApCount);\r
533\r
534 if (SmmCpuFeaturesNeedConfigureMtrrs()) {\r
535 //\r
536 // Signal all APs it's time for backup MTRRs\r
537 //\r
538 ReleaseAllAPs ();\r
539\r
540 //\r
541 // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at\r
542 // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set\r
543 // to a large enough value to avoid this situation.\r
544 // Note: For HT capable CPUs, threads within a core share the same set of MTRRs.\r
545 // We do the backup first and then set MTRR to avoid race condition for threads\r
546 // in the same core.\r
547 //\r
548 MtrrGetAllMtrrs(&Mtrrs);\r
549\r
550 //\r
551 // Wait for all APs to complete their MTRR saving\r
552 //\r
553 WaitForAllAPs (ApCount);\r
554\r
555 //\r
556 // Let all processors program SMM MTRRs together\r
557 //\r
558 ReleaseAllAPs ();\r
559\r
560 //\r
561 // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at\r
562 // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set\r
563 // to a large enough value to avoid this situation.\r
564 //\r
565 ReplaceOSMtrrs (CpuIndex);\r
566\r
567 //\r
568 // Wait for all APs to complete their MTRR programming\r
569 //\r
570 WaitForAllAPs (ApCount);\r
571 }\r
572 }\r
573\r
574 //\r
575 // The BUSY lock is initialized to Acquired state\r
576 //\r
170a3c1e 577 AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
529a5a86
MK
578\r
579 //\r
9f419739 580 // Perform the pre tasks\r
529a5a86 581 //\r
9f419739 582 PerformPreTasks ();\r
529a5a86
MK
583\r
584 //\r
585 // Invoke SMM Foundation EntryPoint with the processor information context.\r
586 //\r
587 gSmmCpuPrivate->SmmCoreEntry (&gSmmCpuPrivate->SmmCoreEntryContext);\r
588\r
589 //\r
590 // Make sure all APs have completed their pending none-block tasks\r
591 //\r
51dd408a 592 WaitForAllAPsNotBusy (TRUE);\r
529a5a86
MK
593\r
594 //\r
595 // Perform the remaining tasks\r
596 //\r
597 PerformRemainingTasks ();\r
598\r
599 //\r
600 // If Relaxed-AP Sync Mode: gather all available APs after BSP SMM handlers are done, and\r
601 // make those APs to exit SMI synchronously. APs which arrive later will be excluded and\r
602 // will run through freely.\r
603 //\r
604 if (SyncMode != SmmCpuSyncModeTradition && !SmmCpuFeaturesNeedConfigureMtrrs()) {\r
605\r
606 //\r
607 // Lock the counter down and retrieve the number of APs\r
608 //\r
fe3a75bc
JF
609 *mSmmMpSyncData->AllCpusInSync = TRUE;\r
610 ApCount = LockdownSemaphore (mSmmMpSyncData->Counter) - 1;\r
529a5a86
MK
611 //\r
612 // Make sure all APs have their Present flag set\r
613 //\r
614 while (TRUE) {\r
615 PresentCount = 0;\r
70911f1f 616 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
ed3d5ecb 617 if (*(mSmmMpSyncData->CpuData[Index].Present)) {\r
529a5a86
MK
618 PresentCount ++;\r
619 }\r
620 }\r
621 if (PresentCount > ApCount) {\r
622 break;\r
623 }\r
624 }\r
625 }\r
626\r
627 //\r
628 // Notify all APs to exit\r
629 //\r
fe3a75bc 630 *mSmmMpSyncData->InsideSmm = FALSE;\r
529a5a86
MK
631 ReleaseAllAPs ();\r
632\r
633 //\r
634 // Wait for all APs to complete their pending tasks\r
635 //\r
636 WaitForAllAPs (ApCount);\r
637\r
638 if (SmmCpuFeaturesNeedConfigureMtrrs()) {\r
639 //\r
640 // Signal APs to restore MTRRs\r
641 //\r
642 ReleaseAllAPs ();\r
643\r
644 //\r
645 // Restore OS MTRRs\r
646 //\r
647 SmmCpuFeaturesReenableSmrr ();\r
648 MtrrSetAllMtrrs(&Mtrrs);\r
649\r
650 //\r
651 // Wait for all APs to complete MTRR programming\r
652 //\r
653 WaitForAllAPs (ApCount);\r
654 }\r
655\r
656 //\r
657 // Stop source level debug in BSP handler, the code below will not be\r
658 // debugged.\r
659 //\r
660 InitializeDebugAgent (DEBUG_AGENT_INIT_EXIT_SMI, NULL, NULL);\r
661\r
662 //\r
663 // Signal APs to Reset states/semaphore for this processor\r
664 //\r
665 ReleaseAllAPs ();\r
666\r
667 //\r
668 // Perform pending operations for hot-plug\r
669 //\r
670 SmmCpuUpdate ();\r
671\r
672 //\r
673 // Clear the Present flag of BSP\r
674 //\r
ed3d5ecb 675 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;\r
529a5a86
MK
676\r
677 //\r
678 // Gather APs to exit SMM synchronously. Note the Present flag is cleared by now but\r
679 // WaitForAllAps does not depend on the Present flag.\r
680 //\r
681 WaitForAllAPs (ApCount);\r
682\r
51dd408a 683 //\r
b948a496 684 // Reset the tokens buffer.\r
51dd408a 685 //\r
b948a496 686 ResetTokens ();\r
51dd408a 687\r
529a5a86
MK
688 //\r
689 // Reset BspIndex to -1, meaning BSP has not been elected.\r
690 //\r
691 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {\r
692 mSmmMpSyncData->BspIndex = (UINT32)-1;\r
693 }\r
694\r
695 //\r
696 // Allow APs to check in from this point on\r
697 //\r
fe3a75bc
JF
698 *mSmmMpSyncData->Counter = 0;\r
699 *mSmmMpSyncData->AllCpusInSync = FALSE;\r
529a5a86
MK
700}\r
701\r
702/**\r
703 SMI handler for AP.\r
704\r
705 @param CpuIndex AP processor Index.\r
706 @param ValidSmi Indicates that current SMI is a valid SMI or not.\r
707 @param SyncMode SMM MP sync mode.\r
708\r
709**/\r
710VOID\r
711APHandler (\r
712 IN UINTN CpuIndex,\r
713 IN BOOLEAN ValidSmi,\r
714 IN SMM_CPU_SYNC_MODE SyncMode\r
715 )\r
716{\r
717 UINT64 Timer;\r
718 UINTN BspIndex;\r
719 MTRR_SETTINGS Mtrrs;\r
51dd408a 720 EFI_STATUS ProcedureStatus;\r
529a5a86
MK
721\r
722 //\r
723 // Timeout BSP\r
724 //\r
725 for (Timer = StartSyncTimer ();\r
726 !IsSyncTimerTimeout (Timer) &&\r
fe3a75bc 727 !(*mSmmMpSyncData->InsideSmm);\r
529a5a86
MK
728 ) {\r
729 CpuPause ();\r
730 }\r
731\r
fe3a75bc 732 if (!(*mSmmMpSyncData->InsideSmm)) {\r
529a5a86
MK
733 //\r
734 // BSP timeout in the first round\r
735 //\r
736 if (mSmmMpSyncData->BspIndex != -1) {\r
737 //\r
738 // BSP Index is known\r
739 //\r
740 BspIndex = mSmmMpSyncData->BspIndex;\r
741 ASSERT (CpuIndex != BspIndex);\r
742\r
743 //\r
744 // Send SMI IPI to bring BSP in\r
745 //\r
746 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[BspIndex].ProcessorId);\r
747\r
748 //\r
749 // Now clock BSP for the 2nd time\r
750 //\r
751 for (Timer = StartSyncTimer ();\r
752 !IsSyncTimerTimeout (Timer) &&\r
fe3a75bc 753 !(*mSmmMpSyncData->InsideSmm);\r
529a5a86
MK
754 ) {\r
755 CpuPause ();\r
756 }\r
757\r
fe3a75bc 758 if (!(*mSmmMpSyncData->InsideSmm)) {\r
529a5a86
MK
759 //\r
760 // Give up since BSP is unable to enter SMM\r
761 // and signal the completion of this AP\r
fe3a75bc 762 WaitForSemaphore (mSmmMpSyncData->Counter);\r
529a5a86
MK
763 return;\r
764 }\r
765 } else {\r
766 //\r
767 // Don't know BSP index. Give up without sending IPI to BSP.\r
768 //\r
fe3a75bc 769 WaitForSemaphore (mSmmMpSyncData->Counter);\r
529a5a86
MK
770 return;\r
771 }\r
772 }\r
773\r
774 //\r
775 // BSP is available\r
776 //\r
777 BspIndex = mSmmMpSyncData->BspIndex;\r
778 ASSERT (CpuIndex != BspIndex);\r
779\r
780 //\r
781 // Mark this processor's presence\r
782 //\r
ed3d5ecb 783 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = TRUE;\r
529a5a86
MK
784\r
785 if (SyncMode == SmmCpuSyncModeTradition || SmmCpuFeaturesNeedConfigureMtrrs()) {\r
786 //\r
787 // Notify BSP of arrival at this point\r
788 //\r
ed3d5ecb 789 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
529a5a86
MK
790 }\r
791\r
792 if (SmmCpuFeaturesNeedConfigureMtrrs()) {\r
793 //\r
794 // Wait for the signal from BSP to backup MTRRs\r
795 //\r
ed3d5ecb 796 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r
529a5a86
MK
797\r
798 //\r
799 // Backup OS MTRRs\r
800 //\r
801 MtrrGetAllMtrrs(&Mtrrs);\r
802\r
803 //\r
804 // Signal BSP the completion of this AP\r
805 //\r
ed3d5ecb 806 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
529a5a86
MK
807\r
808 //\r
809 // Wait for BSP's signal to program MTRRs\r
810 //\r
ed3d5ecb 811 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r
529a5a86
MK
812\r
813 //\r
814 // Replace OS MTRRs with SMI MTRRs\r
815 //\r
816 ReplaceOSMtrrs (CpuIndex);\r
817\r
818 //\r
819 // Signal BSP the completion of this AP\r
820 //\r
ed3d5ecb 821 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
529a5a86
MK
822 }\r
823\r
824 while (TRUE) {\r
825 //\r
826 // Wait for something to happen\r
827 //\r
ed3d5ecb 828 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r
529a5a86
MK
829\r
830 //\r
831 // Check if BSP wants to exit SMM\r
832 //\r
fe3a75bc 833 if (!(*mSmmMpSyncData->InsideSmm)) {\r
529a5a86
MK
834 break;\r
835 }\r
836\r
837 //\r
838 // BUSY should be acquired by SmmStartupThisAp()\r
839 //\r
840 ASSERT (\r
ed3d5ecb 841 !AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[CpuIndex].Busy)\r
529a5a86
MK
842 );\r
843\r
844 //\r
845 // Invoke the scheduled procedure\r
846 //\r
51dd408a
ED
847 ProcedureStatus = (*mSmmMpSyncData->CpuData[CpuIndex].Procedure) (\r
848 (VOID*)mSmmMpSyncData->CpuData[CpuIndex].Parameter\r
849 );\r
850 if (mSmmMpSyncData->CpuData[CpuIndex].Status != NULL) {\r
851 *mSmmMpSyncData->CpuData[CpuIndex].Status = ProcedureStatus;\r
852 }\r
529a5a86 853\r
a457823f
ED
854 if (mSmmMpSyncData->CpuData[CpuIndex].Token != NULL) {\r
855 ReleaseToken (CpuIndex);\r
856 }\r
857\r
529a5a86
MK
858 //\r
859 // Release BUSY\r
860 //\r
ed3d5ecb 861 ReleaseSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
529a5a86
MK
862 }\r
863\r
864 if (SmmCpuFeaturesNeedConfigureMtrrs()) {\r
865 //\r
866 // Notify BSP the readiness of this AP to program MTRRs\r
867 //\r
ed3d5ecb 868 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
529a5a86
MK
869\r
870 //\r
871 // Wait for the signal from BSP to program MTRRs\r
872 //\r
ed3d5ecb 873 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r
529a5a86
MK
874\r
875 //\r
876 // Restore OS MTRRs\r
877 //\r
878 SmmCpuFeaturesReenableSmrr ();\r
879 MtrrSetAllMtrrs(&Mtrrs);\r
880 }\r
881\r
882 //\r
883 // Notify BSP the readiness of this AP to Reset states/semaphore for this processor\r
884 //\r
ed3d5ecb 885 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
529a5a86
MK
886\r
887 //\r
888 // Wait for the signal from BSP to Reset states/semaphore for this processor\r
889 //\r
ed3d5ecb 890 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r
529a5a86
MK
891\r
892 //\r
893 // Reset states/semaphore for this processor\r
894 //\r
ed3d5ecb 895 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;\r
529a5a86
MK
896\r
897 //\r
898 // Notify BSP the readiness of this AP to exit SMM\r
899 //\r
ed3d5ecb 900 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
529a5a86
MK
901\r
902}\r
903\r
904/**\r
905 Create 4G PageTable in SMRAM.\r
906\r
717fb604 907 @param[in] Is32BitPageTable Whether the page table is 32-bit PAE\r
529a5a86
MK
908 @return PageTable Address\r
909\r
910**/\r
911UINT32\r
912Gen4GPageTable (\r
881520ea 913 IN BOOLEAN Is32BitPageTable\r
529a5a86
MK
914 )\r
915{\r
916 VOID *PageTable;\r
917 UINTN Index;\r
918 UINT64 *Pte;\r
919 UINTN PagesNeeded;\r
920 UINTN Low2MBoundary;\r
921 UINTN High2MBoundary;\r
922 UINTN Pages;\r
923 UINTN GuardPage;\r
924 UINT64 *Pdpte;\r
925 UINTN PageIndex;\r
926 UINTN PageAddress;\r
927\r
928 Low2MBoundary = 0;\r
929 High2MBoundary = 0;\r
930 PagesNeeded = 0;\r
931 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
932 //\r
933 // Add one more page for known good stack, then find the lower 2MB aligned address.\r
934 //\r
935 Low2MBoundary = (mSmmStackArrayBase + EFI_PAGE_SIZE) & ~(SIZE_2MB-1);\r
936 //\r
937 // Add two more pages for known good stack and stack guard page,\r
938 // then find the lower 2MB aligned address.\r
939 //\r
940 High2MBoundary = (mSmmStackArrayEnd - mSmmStackSize + EFI_PAGE_SIZE * 2) & ~(SIZE_2MB-1);\r
941 PagesNeeded = ((High2MBoundary - Low2MBoundary) / SIZE_2MB) + 1;\r
942 }\r
943 //\r
944 // Allocate the page table\r
945 //\r
717fb604 946 PageTable = AllocatePageTableMemory (5 + PagesNeeded);\r
529a5a86
MK
947 ASSERT (PageTable != NULL);\r
948\r
717fb604 949 PageTable = (VOID *)((UINTN)PageTable);\r
529a5a86
MK
950 Pte = (UINT64*)PageTable;\r
951\r
952 //\r
953 // Zero out all page table entries first\r
954 //\r
955 ZeroMem (Pte, EFI_PAGES_TO_SIZE (1));\r
956\r
957 //\r
958 // Set Page Directory Pointers\r
959 //\r
960 for (Index = 0; Index < 4; Index++) {\r
e62a0eb6 961 Pte[Index] = ((UINTN)PageTable + EFI_PAGE_SIZE * (Index + 1)) | mAddressEncMask |\r
241f9149 962 (Is32BitPageTable ? IA32_PAE_PDPTE_ATTRIBUTE_BITS : PAGE_ATTRIBUTE_BITS);\r
529a5a86
MK
963 }\r
964 Pte += EFI_PAGE_SIZE / sizeof (*Pte);\r
965\r
966 //\r
967 // Fill in Page Directory Entries\r
968 //\r
969 for (Index = 0; Index < EFI_PAGE_SIZE * 4 / sizeof (*Pte); Index++) {\r
241f9149 970 Pte[Index] = (Index << 21) | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;\r
529a5a86
MK
971 }\r
972\r
f8c1133b 973 Pdpte = (UINT64*)PageTable;\r
529a5a86
MK
974 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
975 Pages = (UINTN)PageTable + EFI_PAGES_TO_SIZE (5);\r
976 GuardPage = mSmmStackArrayBase + EFI_PAGE_SIZE;\r
529a5a86 977 for (PageIndex = Low2MBoundary; PageIndex <= High2MBoundary; PageIndex += SIZE_2MB) {\r
241f9149
LD
978 Pte = (UINT64*)(UINTN)(Pdpte[BitFieldRead32 ((UINT32)PageIndex, 30, 31)] & ~mAddressEncMask & ~(EFI_PAGE_SIZE - 1));\r
979 Pte[BitFieldRead32 ((UINT32)PageIndex, 21, 29)] = (UINT64)Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
529a5a86
MK
980 //\r
981 // Fill in Page Table Entries\r
982 //\r
983 Pte = (UINT64*)Pages;\r
984 PageAddress = PageIndex;\r
985 for (Index = 0; Index < EFI_PAGE_SIZE / sizeof (*Pte); Index++) {\r
986 if (PageAddress == GuardPage) {\r
987 //\r
988 // Mark the guard page as non-present\r
989 //\r
241f9149 990 Pte[Index] = PageAddress | mAddressEncMask;\r
529a5a86
MK
991 GuardPage += mSmmStackSize;\r
992 if (GuardPage > mSmmStackArrayEnd) {\r
993 GuardPage = 0;\r
994 }\r
995 } else {\r
241f9149 996 Pte[Index] = PageAddress | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
529a5a86
MK
997 }\r
998 PageAddress+= EFI_PAGE_SIZE;\r
999 }\r
1000 Pages += EFI_PAGE_SIZE;\r
1001 }\r
1002 }\r
1003\r
f8c1133b
JW
1004 if ((PcdGet8 (PcdNullPointerDetectionPropertyMask) & BIT1) != 0) {\r
1005 Pte = (UINT64*)(UINTN)(Pdpte[0] & ~mAddressEncMask & ~(EFI_PAGE_SIZE - 1));\r
1006 if ((Pte[0] & IA32_PG_PS) == 0) {\r
1007 // 4K-page entries are already mapped. Just hide the first one anyway.\r
1008 Pte = (UINT64*)(UINTN)(Pte[0] & ~mAddressEncMask & ~(EFI_PAGE_SIZE - 1));\r
79da2d28 1009 Pte[0] &= ~(UINT64)IA32_PG_P; // Hide page 0\r
f8c1133b
JW
1010 } else {\r
1011 // Create 4K-page entries\r
1012 Pages = (UINTN)AllocatePageTableMemory (1);\r
1013 ASSERT (Pages != 0);\r
1014\r
1015 Pte[0] = (UINT64)(Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS);\r
1016\r
1017 Pte = (UINT64*)Pages;\r
1018 PageAddress = 0;\r
1019 Pte[0] = PageAddress | mAddressEncMask; // Hide page 0 but present left\r
1020 for (Index = 1; Index < EFI_PAGE_SIZE / sizeof (*Pte); Index++) {\r
1021 PageAddress += EFI_PAGE_SIZE;\r
1022 Pte[Index] = PageAddress | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
1023 }\r
1024 }\r
1025 }\r
1026\r
529a5a86
MK
1027 return (UINT32)(UINTN)PageTable;\r
1028}\r
1029\r
51dd408a
ED
1030/**\r
1031 Checks whether the input token is the current used token.\r
1032\r
1033 @param[in] Token This parameter describes the token that was passed into DispatchProcedure or\r
1034 BroadcastProcedure.\r
1035\r
1036 @retval TRUE The input token is the current used token.\r
1037 @retval FALSE The input token is not the current used token.\r
1038**/\r
1039BOOLEAN\r
1040IsTokenInUse (\r
1041 IN SPIN_LOCK *Token\r
1042 )\r
1043{\r
1044 LIST_ENTRY *Link;\r
1045 PROCEDURE_TOKEN *ProcToken;\r
1046\r
1047 if (Token == NULL) {\r
1048 return FALSE;\r
1049 }\r
1050\r
1051 Link = GetFirstNode (&gSmmCpuPrivate->TokenList);\r
1052 while (!IsNull (&gSmmCpuPrivate->TokenList, Link)) {\r
1053 ProcToken = PROCEDURE_TOKEN_FROM_LINK (Link);\r
1054\r
b948a496 1055 if (ProcToken->Used && ProcToken->SpinLock == Token) {\r
51dd408a
ED
1056 return TRUE;\r
1057 }\r
1058\r
1059 Link = GetNextNode (&gSmmCpuPrivate->TokenList, Link);\r
1060 }\r
1061\r
1062 return FALSE;\r
1063}\r
1064\r
1065/**\r
b948a496 1066 Allocate buffer for the SPIN_LOCK and PROCEDURE_TOKEN.\r
51dd408a 1067\r
3fdc47c6 1068 @return First token of the token buffer.\r
51dd408a 1069**/\r
3fdc47c6 1070LIST_ENTRY *\r
b948a496
ED
1071AllocateTokenBuffer (\r
1072 VOID\r
51dd408a
ED
1073 )\r
1074{\r
51dd408a 1075 UINTN SpinLockSize;\r
9caaa79d 1076 UINT32 TokenCountPerChunk;\r
b948a496 1077 UINTN Index;\r
b948a496
ED
1078 SPIN_LOCK *SpinLock;\r
1079 UINT8 *SpinLockBuffer;\r
3fdc47c6 1080 PROCEDURE_TOKEN *ProcTokens;\r
51dd408a
ED
1081\r
1082 SpinLockSize = GetSpinLockProperties ();\r
b948a496 1083\r
9caaa79d 1084 TokenCountPerChunk = FixedPcdGet32 (PcdCpuSmmMpTokenCountPerChunk);\r
b948a496
ED
1085 ASSERT (TokenCountPerChunk != 0);\r
1086 if (TokenCountPerChunk == 0) {\r
1087 DEBUG ((DEBUG_ERROR, "PcdCpuSmmMpTokenCountPerChunk should not be Zero!\n"));\r
1088 CpuDeadLoop ();\r
1089 }\r
1090 DEBUG ((DEBUG_INFO, "CpuSmm: SpinLock Size = 0x%x, PcdCpuSmmMpTokenCountPerChunk = 0x%x\n", SpinLockSize, TokenCountPerChunk));\r
9caaa79d 1091\r
b948a496
ED
1092 //\r
1093 // Separate the Spin_lock and Proc_token because the alignment requires by Spin_Lock.\r
1094 //\r
1095 SpinLockBuffer = AllocatePool (SpinLockSize * TokenCountPerChunk);\r
1096 ASSERT (SpinLockBuffer != NULL);\r
9caaa79d 1097\r
3fdc47c6
RN
1098 ProcTokens = AllocatePool (sizeof (PROCEDURE_TOKEN) * TokenCountPerChunk);\r
1099 ASSERT (ProcTokens != NULL);\r
b948a496
ED
1100\r
1101 for (Index = 0; Index < TokenCountPerChunk; Index++) {\r
1102 SpinLock = (SPIN_LOCK *)(SpinLockBuffer + SpinLockSize * Index);\r
1103 InitializeSpinLock (SpinLock);\r
1104\r
3fdc47c6
RN
1105 ProcTokens[Index].Signature = PROCEDURE_TOKEN_SIGNATURE;\r
1106 ProcTokens[Index].SpinLock = SpinLock;\r
1107 ProcTokens[Index].Used = FALSE;\r
1108 ProcTokens[Index].RunningApCount = 0;\r
b948a496 1109\r
3fdc47c6 1110 InsertTailList (&gSmmCpuPrivate->TokenList, &ProcTokens[Index].Link);\r
b948a496 1111 }\r
9caaa79d 1112\r
3fdc47c6 1113 return &ProcTokens[0].Link;\r
b948a496
ED
1114}\r
1115\r
1116/**\r
1117 Get the free token.\r
1118\r
1119 If no free token, allocate new tokens then return the free one.\r
1120\r
e1879256
ED
1121 @param RunningApsCount The Running Aps count for this token.\r
1122\r
b948a496 1123 @retval return the first free PROCEDURE_TOKEN.\r
9caaa79d 1124\r
b948a496
ED
1125**/\r
1126PROCEDURE_TOKEN *\r
1127GetFreeToken (\r
1128 IN UINT32 RunningApsCount\r
1129 )\r
1130{\r
1131 PROCEDURE_TOKEN *NewToken;\r
51dd408a 1132\r
3fdc47c6
RN
1133 //\r
1134 // If FirstFreeToken meets the end of token list, enlarge the token list.\r
1135 // Set FirstFreeToken to the first free token.\r
1136 //\r
1137 if (gSmmCpuPrivate->FirstFreeToken == &gSmmCpuPrivate->TokenList) {\r
1138 gSmmCpuPrivate->FirstFreeToken = AllocateTokenBuffer ();\r
b948a496 1139 }\r
3fdc47c6
RN
1140 NewToken = PROCEDURE_TOKEN_FROM_LINK (gSmmCpuPrivate->FirstFreeToken);\r
1141 gSmmCpuPrivate->FirstFreeToken = GetNextNode (&gSmmCpuPrivate->TokenList, gSmmCpuPrivate->FirstFreeToken);\r
51dd408a 1142\r
b948a496
ED
1143 NewToken->Used = TRUE;\r
1144 NewToken->RunningApCount = RunningApsCount;\r
1145 AcquireSpinLock (NewToken->SpinLock);\r
51dd408a 1146\r
b948a496 1147 return NewToken;\r
51dd408a
ED
1148}\r
1149\r
1150/**\r
1151 Checks status of specified AP.\r
1152\r
1153 This function checks whether the specified AP has finished the task assigned\r
1154 by StartupThisAP(), and whether timeout expires.\r
1155\r
1156 @param[in] Token This parameter describes the token that was passed into DispatchProcedure or\r
1157 BroadcastProcedure.\r
1158\r
1159 @retval EFI_SUCCESS Specified AP has finished task assigned by StartupThisAPs().\r
1160 @retval EFI_NOT_READY Specified AP has not finished task and timeout has not expired.\r
1161**/\r
1162EFI_STATUS\r
1163IsApReady (\r
1164 IN SPIN_LOCK *Token\r
1165 )\r
1166{\r
1167 if (AcquireSpinLockOrFail (Token)) {\r
1168 ReleaseSpinLock (Token);\r
1169 return EFI_SUCCESS;\r
1170 }\r
1171\r
1172 return EFI_NOT_READY;\r
1173}\r
1174\r
529a5a86
MK
1175/**\r
1176 Schedule a procedure to run on the specified CPU.\r
1177\r
717fb604
JY
1178 @param[in] Procedure The address of the procedure to run\r
1179 @param[in] CpuIndex Target CPU Index\r
51dd408a
ED
1180 @param[in,out] ProcArguments The parameter to pass to the procedure\r
1181 @param[in] Token This is an optional parameter that allows the caller to execute the\r
1182 procedure in a blocking or non-blocking fashion. If it is NULL the\r
1183 call is blocking, and the call will not return until the AP has\r
1184 completed the procedure. If the token is not NULL, the call will\r
1185 return immediately. The caller can check whether the procedure has\r
1186 completed with CheckOnProcedure or WaitForProcedure.\r
1187 @param[in] TimeoutInMicroseconds Indicates the time limit in microseconds for the APs to finish\r
1188 execution of Procedure, either for blocking or non-blocking mode.\r
1189 Zero means infinity. If the timeout expires before all APs return\r
1190 from Procedure, then Procedure on the failed APs is terminated. If\r
1191 the timeout expires in blocking mode, the call returns EFI_TIMEOUT.\r
1192 If the timeout expires in non-blocking mode, the timeout determined\r
1193 can be through CheckOnProcedure or WaitForProcedure.\r
1194 Note that timeout support is optional. Whether an implementation\r
1195 supports this feature can be determined via the Attributes data\r
1196 member.\r
1197 @param[in,out] CpuStatus This optional pointer may be used to get the status code returned\r
1198 by Procedure when it completes execution on the target AP, or with\r
1199 EFI_TIMEOUT if the Procedure fails to complete within the optional\r
1200 timeout. The implementation will update this variable with\r
1201 EFI_NOT_READY prior to starting Procedure on the target AP.\r
529a5a86
MK
1202\r
1203 @retval EFI_INVALID_PARAMETER CpuNumber not valid\r
1204 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP\r
1205 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM\r
1206 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy\r
1207 @retval EFI_SUCCESS The procedure has been successfully scheduled\r
1208\r
1209**/\r
1210EFI_STATUS\r
717fb604 1211InternalSmmStartupThisAp (\r
51dd408a
ED
1212 IN EFI_AP_PROCEDURE2 Procedure,\r
1213 IN UINTN CpuIndex,\r
1214 IN OUT VOID *ProcArguments OPTIONAL,\r
1215 IN MM_COMPLETION *Token,\r
1216 IN UINTN TimeoutInMicroseconds,\r
1217 IN OUT EFI_STATUS *CpuStatus\r
529a5a86
MK
1218 )\r
1219{\r
a457823f
ED
1220 PROCEDURE_TOKEN *ProcToken;\r
1221\r
717fb604
JY
1222 if (CpuIndex >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus) {\r
1223 DEBUG((DEBUG_ERROR, "CpuIndex(%d) >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus(%d)\n", CpuIndex, gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus));\r
1224 return EFI_INVALID_PARAMETER;\r
1225 }\r
1226 if (CpuIndex == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) {\r
1227 DEBUG((DEBUG_ERROR, "CpuIndex(%d) == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu\n", CpuIndex));\r
529a5a86
MK
1228 return EFI_INVALID_PARAMETER;\r
1229 }\r
b7025df8
JF
1230 if (gSmmCpuPrivate->ProcessorInfo[CpuIndex].ProcessorId == INVALID_APIC_ID) {\r
1231 return EFI_INVALID_PARAMETER;\r
1232 }\r
717fb604
JY
1233 if (!(*(mSmmMpSyncData->CpuData[CpuIndex].Present))) {\r
1234 if (mSmmMpSyncData->EffectiveSyncMode == SmmCpuSyncModeTradition) {\r
1235 DEBUG((DEBUG_ERROR, "!mSmmMpSyncData->CpuData[%d].Present\n", CpuIndex));\r
1236 }\r
1237 return EFI_INVALID_PARAMETER;\r
1238 }\r
1239 if (gSmmCpuPrivate->Operation[CpuIndex] == SmmCpuRemove) {\r
1240 if (!FeaturePcdGet (PcdCpuHotPlugSupport)) {\r
1241 DEBUG((DEBUG_ERROR, "gSmmCpuPrivate->Operation[%d] == SmmCpuRemove\n", CpuIndex));\r
1242 }\r
1243 return EFI_INVALID_PARAMETER;\r
1244 }\r
51dd408a
ED
1245 if ((TimeoutInMicroseconds != 0) && ((mSmmMp.Attributes & EFI_MM_MP_TIMEOUT_SUPPORTED) == 0)) {\r
1246 return EFI_INVALID_PARAMETER;\r
1247 }\r
1248 if (Procedure == NULL) {\r
1249 return EFI_INVALID_PARAMETER;\r
1250 }\r
717fb604 1251\r
832c4c7a 1252 AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
51dd408a 1253\r
529a5a86
MK
1254 mSmmMpSyncData->CpuData[CpuIndex].Procedure = Procedure;\r
1255 mSmmMpSyncData->CpuData[CpuIndex].Parameter = ProcArguments;\r
51dd408a 1256 if (Token != NULL) {\r
b948a496 1257 ProcToken= GetFreeToken (1);\r
a457823f
ED
1258 mSmmMpSyncData->CpuData[CpuIndex].Token = ProcToken;\r
1259 *Token = (MM_COMPLETION)ProcToken->SpinLock;\r
51dd408a
ED
1260 }\r
1261 mSmmMpSyncData->CpuData[CpuIndex].Status = CpuStatus;\r
1262 if (mSmmMpSyncData->CpuData[CpuIndex].Status != NULL) {\r
1263 *mSmmMpSyncData->CpuData[CpuIndex].Status = EFI_NOT_READY;\r
1264 }\r
1265\r
ed3d5ecb 1266 ReleaseSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r
529a5a86 1267\r
51dd408a 1268 if (Token == NULL) {\r
ed3d5ecb
JF
1269 AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
1270 ReleaseSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
529a5a86 1271 }\r
51dd408a
ED
1272\r
1273 return EFI_SUCCESS;\r
1274}\r
1275\r
1276/**\r
1277 Worker function to execute a caller provided function on all enabled APs.\r
1278\r
1279 @param[in] Procedure A pointer to the function to be run on\r
1280 enabled APs of the system.\r
1281 @param[in] TimeoutInMicroseconds Indicates the time limit in microseconds for\r
1282 APs to return from Procedure, either for\r
1283 blocking or non-blocking mode.\r
1284 @param[in,out] ProcedureArguments The parameter passed into Procedure for\r
1285 all APs.\r
1286 @param[in,out] Token This is an optional parameter that allows the caller to execute the\r
1287 procedure in a blocking or non-blocking fashion. If it is NULL the\r
1288 call is blocking, and the call will not return until the AP has\r
1289 completed the procedure. If the token is not NULL, the call will\r
1290 return immediately. The caller can check whether the procedure has\r
1291 completed with CheckOnProcedure or WaitForProcedure.\r
1292 @param[in,out] CPUStatus This optional pointer may be used to get the status code returned\r
1293 by Procedure when it completes execution on the target AP, or with\r
1294 EFI_TIMEOUT if the Procedure fails to complete within the optional\r
1295 timeout. The implementation will update this variable with\r
1296 EFI_NOT_READY prior to starting Procedure on the target AP.\r
1297\r
1298\r
1299 @retval EFI_SUCCESS In blocking mode, all APs have finished before\r
1300 the timeout expired.\r
1301 @retval EFI_SUCCESS In non-blocking mode, function has been dispatched\r
1302 to all enabled APs.\r
1303 @retval others Failed to Startup all APs.\r
1304\r
1305**/\r
1306EFI_STATUS\r
1307InternalSmmStartupAllAPs (\r
1308 IN EFI_AP_PROCEDURE2 Procedure,\r
1309 IN UINTN TimeoutInMicroseconds,\r
1310 IN OUT VOID *ProcedureArguments OPTIONAL,\r
1311 IN OUT MM_COMPLETION *Token,\r
1312 IN OUT EFI_STATUS *CPUStatus\r
1313 )\r
1314{\r
1315 UINTN Index;\r
1316 UINTN CpuCount;\r
a457823f 1317 PROCEDURE_TOKEN *ProcToken;\r
51dd408a
ED
1318\r
1319 if ((TimeoutInMicroseconds != 0) && ((mSmmMp.Attributes & EFI_MM_MP_TIMEOUT_SUPPORTED) == 0)) {\r
1320 return EFI_INVALID_PARAMETER;\r
1321 }\r
1322 if (Procedure == NULL) {\r
1323 return EFI_INVALID_PARAMETER;\r
1324 }\r
1325\r
1326 CpuCount = 0;\r
70911f1f 1327 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
51dd408a
ED
1328 if (IsPresentAp (Index)) {\r
1329 CpuCount ++;\r
1330\r
1331 if (gSmmCpuPrivate->Operation[Index] == SmmCpuRemove) {\r
1332 return EFI_INVALID_PARAMETER;\r
1333 }\r
1334\r
1335 if (!AcquireSpinLockOrFail(mSmmMpSyncData->CpuData[Index].Busy)) {\r
1336 return EFI_NOT_READY;\r
1337 }\r
1338 ReleaseSpinLock (mSmmMpSyncData->CpuData[Index].Busy);\r
1339 }\r
1340 }\r
1341 if (CpuCount == 0) {\r
1342 return EFI_NOT_STARTED;\r
1343 }\r
1344\r
1345 if (Token != NULL) {\r
b948a496 1346 ProcToken = GetFreeToken ((UINT32)mMaxNumberOfCpus);\r
a457823f
ED
1347 *Token = (MM_COMPLETION)ProcToken->SpinLock;\r
1348 } else {\r
1349 ProcToken = NULL;\r
51dd408a
ED
1350 }\r
1351\r
1352 //\r
1353 // Make sure all BUSY should be acquired.\r
1354 //\r
1355 // Because former code already check mSmmMpSyncData->CpuData[***].Busy for each AP.\r
1356 // Here code always use AcquireSpinLock instead of AcquireSpinLockOrFail for not\r
1357 // block mode.\r
1358 //\r
70911f1f 1359 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
51dd408a
ED
1360 if (IsPresentAp (Index)) {\r
1361 AcquireSpinLock (mSmmMpSyncData->CpuData[Index].Busy);\r
1362 }\r
1363 }\r
1364\r
70911f1f 1365 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
51dd408a
ED
1366 if (IsPresentAp (Index)) {\r
1367 mSmmMpSyncData->CpuData[Index].Procedure = (EFI_AP_PROCEDURE2) Procedure;\r
1368 mSmmMpSyncData->CpuData[Index].Parameter = ProcedureArguments;\r
a457823f
ED
1369 if (ProcToken != NULL) {\r
1370 mSmmMpSyncData->CpuData[Index].Token = ProcToken;\r
51dd408a
ED
1371 }\r
1372 if (CPUStatus != NULL) {\r
1373 mSmmMpSyncData->CpuData[Index].Status = &CPUStatus[Index];\r
1374 if (mSmmMpSyncData->CpuData[Index].Status != NULL) {\r
1375 *mSmmMpSyncData->CpuData[Index].Status = EFI_NOT_READY;\r
1376 }\r
1377 }\r
1378 } else {\r
1379 //\r
1380 // PI spec requirement:\r
1381 // For every excluded processor, the array entry must contain a value of EFI_NOT_STARTED.\r
1382 //\r
1383 if (CPUStatus != NULL) {\r
1384 CPUStatus[Index] = EFI_NOT_STARTED;\r
1385 }\r
a457823f
ED
1386\r
1387 //\r
1388 // Decrease the count to mark this processor(AP or BSP) as finished.\r
1389 //\r
1390 if (ProcToken != NULL) {\r
1391 WaitForSemaphore (&ProcToken->RunningApCount);\r
1392 }\r
51dd408a
ED
1393 }\r
1394 }\r
1395\r
1396 ReleaseAllAPs ();\r
1397\r
1398 if (Token == NULL) {\r
1399 //\r
1400 // Make sure all APs have completed their tasks.\r
1401 //\r
1402 WaitForAllAPsNotBusy (TRUE);\r
1403 }\r
1404\r
1405 return EFI_SUCCESS;\r
1406}\r
1407\r
1408/**\r
1409 ISO C99 6.5.2.2 "Function calls", paragraph 9:\r
1410 If the function is defined with a type that is not compatible with\r
1411 the type (of the expression) pointed to by the expression that\r
1412 denotes the called function, the behavior is undefined.\r
1413\r
1414 So add below wrapper function to convert between EFI_AP_PROCEDURE\r
1415 and EFI_AP_PROCEDURE2.\r
1416\r
1417 Wrapper for Procedures.\r
1418\r
1419 @param[in] Buffer Pointer to PROCEDURE_WRAPPER buffer.\r
1420\r
1421**/\r
1422EFI_STATUS\r
1423EFIAPI\r
1424ProcedureWrapper (\r
5ed4c46f 1425 IN VOID *Buffer\r
51dd408a
ED
1426 )\r
1427{\r
1428 PROCEDURE_WRAPPER *Wrapper;\r
1429\r
1430 Wrapper = Buffer;\r
1431 Wrapper->Procedure (Wrapper->ProcedureArgument);\r
1432\r
529a5a86
MK
1433 return EFI_SUCCESS;\r
1434}\r
1435\r
717fb604
JY
1436/**\r
1437 Schedule a procedure to run on the specified CPU in blocking mode.\r
1438\r
1439 @param[in] Procedure The address of the procedure to run\r
1440 @param[in] CpuIndex Target CPU Index\r
1441 @param[in, out] ProcArguments The parameter to pass to the procedure\r
1442\r
1443 @retval EFI_INVALID_PARAMETER CpuNumber not valid\r
1444 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP\r
1445 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM\r
1446 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy\r
1447 @retval EFI_SUCCESS The procedure has been successfully scheduled\r
1448\r
1449**/\r
1450EFI_STATUS\r
1451EFIAPI\r
1452SmmBlockingStartupThisAp (\r
1453 IN EFI_AP_PROCEDURE Procedure,\r
1454 IN UINTN CpuIndex,\r
1455 IN OUT VOID *ProcArguments OPTIONAL\r
1456 )\r
1457{\r
51dd408a
ED
1458 PROCEDURE_WRAPPER Wrapper;\r
1459\r
1460 Wrapper.Procedure = Procedure;\r
1461 Wrapper.ProcedureArgument = ProcArguments;\r
1462\r
1463 //\r
1464 // Use wrapper function to convert EFI_AP_PROCEDURE to EFI_AP_PROCEDURE2.\r
1465 //\r
1466 return InternalSmmStartupThisAp (ProcedureWrapper, CpuIndex, &Wrapper, NULL, 0, NULL);\r
717fb604
JY
1467}\r
1468\r
1469/**\r
1470 Schedule a procedure to run on the specified CPU.\r
1471\r
1472 @param Procedure The address of the procedure to run\r
1473 @param CpuIndex Target CPU Index\r
1474 @param ProcArguments The parameter to pass to the procedure\r
1475\r
1476 @retval EFI_INVALID_PARAMETER CpuNumber not valid\r
1477 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP\r
1478 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM\r
1479 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy\r
1480 @retval EFI_SUCCESS The procedure has been successfully scheduled\r
1481\r
1482**/\r
1483EFI_STATUS\r
1484EFIAPI\r
1485SmmStartupThisAp (\r
1486 IN EFI_AP_PROCEDURE Procedure,\r
1487 IN UINTN CpuIndex,\r
1488 IN OUT VOID *ProcArguments OPTIONAL\r
1489 )\r
1490{\r
51dd408a
ED
1491 MM_COMPLETION Token;\r
1492\r
1493 gSmmCpuPrivate->ApWrapperFunc[CpuIndex].Procedure = Procedure;\r
1494 gSmmCpuPrivate->ApWrapperFunc[CpuIndex].ProcedureArgument = ProcArguments;\r
1495\r
1496 //\r
1497 // Use wrapper function to convert EFI_AP_PROCEDURE to EFI_AP_PROCEDURE2.\r
1498 //\r
1499 return InternalSmmStartupThisAp (\r
1500 ProcedureWrapper,\r
1501 CpuIndex,\r
1502 &gSmmCpuPrivate->ApWrapperFunc[CpuIndex],\r
1503 FeaturePcdGet (PcdCpuSmmBlockStartupThisAp) ? NULL : &Token,\r
1504 0,\r
1505 NULL\r
1506 );\r
717fb604
JY
1507}\r
1508\r
f45f2d4a 1509/**\r
3eed6dda 1510 This function sets DR6 & DR7 according to SMM save state, before running SMM C code.\r
f45f2d4a
JY
1511 They are useful when you want to enable hardware breakpoints in SMM without entry SMM mode.\r
1512\r
1513 NOTE: It might not be appreciated in runtime since it might\r
1514 conflict with OS debugging facilities. Turn them off in RELEASE.\r
1515\r
1516 @param CpuIndex CPU Index\r
1517\r
1518**/\r
1519VOID\r
1520EFIAPI\r
1521CpuSmmDebugEntry (\r
1522 IN UINTN CpuIndex\r
1523 )\r
1524{\r
1525 SMRAM_SAVE_STATE_MAP *CpuSaveState;\r
7367cc6c 1526\r
f45f2d4a 1527 if (FeaturePcdGet (PcdCpuSmmDebug)) {\r
717fb604 1528 ASSERT(CpuIndex < mMaxNumberOfCpus);\r
3eed6dda 1529 CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];\r
f45f2d4a
JY
1530 if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {\r
1531 AsmWriteDr6 (CpuSaveState->x86._DR6);\r
1532 AsmWriteDr7 (CpuSaveState->x86._DR7);\r
1533 } else {\r
1534 AsmWriteDr6 ((UINTN)CpuSaveState->x64._DR6);\r
1535 AsmWriteDr7 ((UINTN)CpuSaveState->x64._DR7);\r
1536 }\r
1537 }\r
1538}\r
1539\r
1540/**\r
3eed6dda 1541 This function restores DR6 & DR7 to SMM save state.\r
f45f2d4a
JY
1542\r
1543 NOTE: It might not be appreciated in runtime since it might\r
1544 conflict with OS debugging facilities. Turn them off in RELEASE.\r
1545\r
1546 @param CpuIndex CPU Index\r
1547\r
1548**/\r
1549VOID\r
1550EFIAPI\r
1551CpuSmmDebugExit (\r
1552 IN UINTN CpuIndex\r
1553 )\r
1554{\r
1555 SMRAM_SAVE_STATE_MAP *CpuSaveState;\r
1556\r
1557 if (FeaturePcdGet (PcdCpuSmmDebug)) {\r
717fb604 1558 ASSERT(CpuIndex < mMaxNumberOfCpus);\r
3eed6dda 1559 CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];\r
f45f2d4a
JY
1560 if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {\r
1561 CpuSaveState->x86._DR7 = (UINT32)AsmReadDr7 ();\r
1562 CpuSaveState->x86._DR6 = (UINT32)AsmReadDr6 ();\r
1563 } else {\r
1564 CpuSaveState->x64._DR7 = AsmReadDr7 ();\r
1565 CpuSaveState->x64._DR6 = AsmReadDr6 ();\r
1566 }\r
1567 }\r
1568}\r
1569\r
529a5a86
MK
1570/**\r
1571 C function for SMI entry, each processor comes here upon SMI trigger.\r
1572\r
1573 @param CpuIndex CPU Index\r
1574\r
1575**/\r
1576VOID\r
1577EFIAPI\r
1578SmiRendezvous (\r
1579 IN UINTN CpuIndex\r
1580 )\r
1581{\r
f85d3ce2
JF
1582 EFI_STATUS Status;\r
1583 BOOLEAN ValidSmi;\r
1584 BOOLEAN IsBsp;\r
1585 BOOLEAN BspInProgress;\r
1586 UINTN Index;\r
1587 UINTN Cr2;\r
717fb604
JY
1588\r
1589 ASSERT(CpuIndex < mMaxNumberOfCpus);\r
529a5a86
MK
1590\r
1591 //\r
37f9fea5
VN
1592 // Save Cr2 because Page Fault exception in SMM may override its value,\r
1593 // when using on-demand paging for above 4G memory.\r
529a5a86 1594 //\r
37f9fea5
VN
1595 Cr2 = 0;\r
1596 SaveCr2 (&Cr2);\r
529a5a86 1597\r
51dd408a
ED
1598 //\r
1599 // Call the user register Startup function first.\r
1600 //\r
1601 if (mSmmMpSyncData->StartupProcedure != NULL) {\r
1602 mSmmMpSyncData->StartupProcedure (mSmmMpSyncData->StartupProcArgs);\r
1603 }\r
1604\r
529a5a86
MK
1605 //\r
1606 // Perform CPU specific entry hooks\r
1607 //\r
1608 SmmCpuFeaturesRendezvousEntry (CpuIndex);\r
1609\r
1610 //\r
1611 // Determine if this is a valid SMI\r
1612 //\r
1613 ValidSmi = PlatformValidSmi();\r
1614\r
1615 //\r
1616 // Determine if BSP has been already in progress. Note this must be checked after\r
1617 // ValidSmi because BSP may clear a valid SMI source after checking in.\r
1618 //\r
fe3a75bc 1619 BspInProgress = *mSmmMpSyncData->InsideSmm;\r
529a5a86
MK
1620\r
1621 if (!BspInProgress && !ValidSmi) {\r
1622 //\r
1623 // If we reach here, it means when we sampled the ValidSmi flag, SMI status had not\r
1624 // been cleared by BSP in a new SMI run (so we have a truly invalid SMI), or SMI\r
1625 // status had been cleared by BSP and an existing SMI run has almost ended. (Note\r
1626 // we sampled ValidSmi flag BEFORE judging BSP-in-progress status.) In both cases, there\r
1627 // is nothing we need to do.\r
1628 //\r
1629 goto Exit;\r
1630 } else {\r
1631 //\r
1632 // Signal presence of this processor\r
1633 //\r
fe3a75bc 1634 if (ReleaseSemaphore (mSmmMpSyncData->Counter) == 0) {\r
529a5a86
MK
1635 //\r
1636 // BSP has already ended the synchronization, so QUIT!!!\r
1637 //\r
1638\r
1639 //\r
1640 // Wait for BSP's signal to finish SMI\r
1641 //\r
fe3a75bc 1642 while (*mSmmMpSyncData->AllCpusInSync) {\r
529a5a86
MK
1643 CpuPause ();\r
1644 }\r
1645 goto Exit;\r
1646 } else {\r
1647\r
1648 //\r
1649 // The BUSY lock is initialized to Released state.\r
1650 // This needs to be done early enough to be ready for BSP's SmmStartupThisAp() call.\r
1651 // E.g., with Relaxed AP flow, SmmStartupThisAp() may be called immediately\r
1652 // after AP's present flag is detected.\r
1653 //\r
ed3d5ecb 1654 InitializeSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
529a5a86
MK
1655 }\r
1656\r
529a5a86
MK
1657 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r
1658 ActivateSmmProfile (CpuIndex);\r
1659 }\r
1660\r
1661 if (BspInProgress) {\r
1662 //\r
1663 // BSP has been elected. Follow AP path, regardless of ValidSmi flag\r
1664 // as BSP may have cleared the SMI status\r
1665 //\r
1666 APHandler (CpuIndex, ValidSmi, mSmmMpSyncData->EffectiveSyncMode);\r
1667 } else {\r
1668 //\r
1669 // We have a valid SMI\r
1670 //\r
1671\r
1672 //\r
1673 // Elect BSP\r
1674 //\r
1675 IsBsp = FALSE;\r
1676 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {\r
1677 if (!mSmmMpSyncData->SwitchBsp || mSmmMpSyncData->CandidateBsp[CpuIndex]) {\r
1678 //\r
1679 // Call platform hook to do BSP election\r
1680 //\r
1681 Status = PlatformSmmBspElection (&IsBsp);\r
1682 if (EFI_SUCCESS == Status) {\r
1683 //\r
1684 // Platform hook determines successfully\r
1685 //\r
1686 if (IsBsp) {\r
1687 mSmmMpSyncData->BspIndex = (UINT32)CpuIndex;\r
1688 }\r
1689 } else {\r
1690 //\r
1691 // Platform hook fails to determine, use default BSP election method\r
1692 //\r
1693 InterlockedCompareExchange32 (\r
1694 (UINT32*)&mSmmMpSyncData->BspIndex,\r
1695 (UINT32)-1,\r
1696 (UINT32)CpuIndex\r
1697 );\r
1698 }\r
1699 }\r
1700 }\r
1701\r
1702 //\r
1703 // "mSmmMpSyncData->BspIndex == CpuIndex" means this is the BSP\r
1704 //\r
1705 if (mSmmMpSyncData->BspIndex == CpuIndex) {\r
1706\r
1707 //\r
1708 // Clear last request for SwitchBsp.\r
1709 //\r
1710 if (mSmmMpSyncData->SwitchBsp) {\r
1711 mSmmMpSyncData->SwitchBsp = FALSE;\r
1712 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
1713 mSmmMpSyncData->CandidateBsp[Index] = FALSE;\r
1714 }\r
1715 }\r
1716\r
1717 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r
1718 SmmProfileRecordSmiNum ();\r
1719 }\r
1720\r
1721 //\r
1722 // BSP Handler is always called with a ValidSmi == TRUE\r
1723 //\r
1724 BSPHandler (CpuIndex, mSmmMpSyncData->EffectiveSyncMode);\r
529a5a86
MK
1725 } else {\r
1726 APHandler (CpuIndex, ValidSmi, mSmmMpSyncData->EffectiveSyncMode);\r
1727 }\r
1728 }\r
1729\r
ed3d5ecb 1730 ASSERT (*mSmmMpSyncData->CpuData[CpuIndex].Run == 0);\r
529a5a86
MK
1731\r
1732 //\r
1733 // Wait for BSP's signal to exit SMI\r
1734 //\r
fe3a75bc 1735 while (*mSmmMpSyncData->AllCpusInSync) {\r
529a5a86
MK
1736 CpuPause ();\r
1737 }\r
1738 }\r
1739\r
1740Exit:\r
1741 SmmCpuFeaturesRendezvousExit (CpuIndex);\r
37f9fea5 1742\r
529a5a86
MK
1743 //\r
1744 // Restore Cr2\r
1745 //\r
37f9fea5 1746 RestoreCr2 (Cr2);\r
529a5a86
MK
1747}\r
1748\r
51dd408a
ED
1749/**\r
1750 Allocate buffer for SpinLock and Wrapper function buffer.\r
1751\r
1752**/\r
1753VOID\r
1754InitializeDataForMmMp (\r
1755 VOID\r
1756 )\r
1757{\r
1758 gSmmCpuPrivate->ApWrapperFunc = AllocatePool (sizeof (PROCEDURE_WRAPPER) * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus);\r
1759 ASSERT (gSmmCpuPrivate->ApWrapperFunc != NULL);\r
1760\r
1761 InitializeListHead (&gSmmCpuPrivate->TokenList);\r
b948a496 1762\r
3fdc47c6 1763 gSmmCpuPrivate->FirstFreeToken = AllocateTokenBuffer ();\r
51dd408a
ED
1764}\r
1765\r
1d648531
JF
1766/**\r
1767 Allocate buffer for all semaphores and spin locks.\r
1768\r
1769**/\r
1770VOID\r
1771InitializeSmmCpuSemaphores (\r
1772 VOID\r
1773 )\r
1774{\r
1775 UINTN ProcessorCount;\r
1776 UINTN TotalSize;\r
1777 UINTN GlobalSemaphoresSize;\r
4e920581 1778 UINTN CpuSemaphoresSize;\r
1d648531
JF
1779 UINTN SemaphoreSize;\r
1780 UINTN Pages;\r
1781 UINTN *SemaphoreBlock;\r
1782 UINTN SemaphoreAddr;\r
1783\r
1784 SemaphoreSize = GetSpinLockProperties ();\r
1785 ProcessorCount = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;\r
1786 GlobalSemaphoresSize = (sizeof (SMM_CPU_SEMAPHORE_GLOBAL) / sizeof (VOID *)) * SemaphoreSize;\r
4e920581 1787 CpuSemaphoresSize = (sizeof (SMM_CPU_SEMAPHORE_CPU) / sizeof (VOID *)) * ProcessorCount * SemaphoreSize;\r
31fb3334 1788 TotalSize = GlobalSemaphoresSize + CpuSemaphoresSize;\r
1d648531
JF
1789 DEBUG((EFI_D_INFO, "One Semaphore Size = 0x%x\n", SemaphoreSize));\r
1790 DEBUG((EFI_D_INFO, "Total Semaphores Size = 0x%x\n", TotalSize));\r
1791 Pages = EFI_SIZE_TO_PAGES (TotalSize);\r
1792 SemaphoreBlock = AllocatePages (Pages);\r
1793 ASSERT (SemaphoreBlock != NULL);\r
1794 ZeroMem (SemaphoreBlock, TotalSize);\r
1795\r
1796 SemaphoreAddr = (UINTN)SemaphoreBlock;\r
1797 mSmmCpuSemaphores.SemaphoreGlobal.Counter = (UINT32 *)SemaphoreAddr;\r
1798 SemaphoreAddr += SemaphoreSize;\r
1799 mSmmCpuSemaphores.SemaphoreGlobal.InsideSmm = (BOOLEAN *)SemaphoreAddr;\r
1800 SemaphoreAddr += SemaphoreSize;\r
1801 mSmmCpuSemaphores.SemaphoreGlobal.AllCpusInSync = (BOOLEAN *)SemaphoreAddr;\r
1802 SemaphoreAddr += SemaphoreSize;\r
1803 mSmmCpuSemaphores.SemaphoreGlobal.PFLock = (SPIN_LOCK *)SemaphoreAddr;\r
1804 SemaphoreAddr += SemaphoreSize;\r
1805 mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock\r
1806 = (SPIN_LOCK *)SemaphoreAddr;\r
6c4c15fa 1807 SemaphoreAddr += SemaphoreSize;\r
6c4c15fa 1808\r
4e920581
JF
1809 SemaphoreAddr = (UINTN)SemaphoreBlock + GlobalSemaphoresSize;\r
1810 mSmmCpuSemaphores.SemaphoreCpu.Busy = (SPIN_LOCK *)SemaphoreAddr;\r
1811 SemaphoreAddr += ProcessorCount * SemaphoreSize;\r
1812 mSmmCpuSemaphores.SemaphoreCpu.Run = (UINT32 *)SemaphoreAddr;\r
1813 SemaphoreAddr += ProcessorCount * SemaphoreSize;\r
1814 mSmmCpuSemaphores.SemaphoreCpu.Present = (BOOLEAN *)SemaphoreAddr;\r
1815\r
fe3a75bc
JF
1816 mPFLock = mSmmCpuSemaphores.SemaphoreGlobal.PFLock;\r
1817 mConfigSmmCodeAccessCheckLock = mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock;\r
1818\r
1d648531
JF
1819 mSemaphoreSize = SemaphoreSize;\r
1820}\r
529a5a86
MK
1821\r
1822/**\r
1823 Initialize un-cacheable data.\r
1824\r
1825**/\r
1826VOID\r
1827EFIAPI\r
1828InitializeMpSyncData (\r
1829 VOID\r
1830 )\r
1831{\r
8b9311b7
JF
1832 UINTN CpuIndex;\r
1833\r
529a5a86 1834 if (mSmmMpSyncData != NULL) {\r
e78a2a49
JF
1835 //\r
1836 // mSmmMpSyncDataSize includes one structure of SMM_DISPATCHER_MP_SYNC_DATA, one\r
1837 // CpuData array of SMM_CPU_DATA_BLOCK and one CandidateBsp array of BOOLEAN.\r
1838 //\r
1839 ZeroMem (mSmmMpSyncData, mSmmMpSyncDataSize);\r
529a5a86
MK
1840 mSmmMpSyncData->CpuData = (SMM_CPU_DATA_BLOCK *)((UINT8 *)mSmmMpSyncData + sizeof (SMM_DISPATCHER_MP_SYNC_DATA));\r
1841 mSmmMpSyncData->CandidateBsp = (BOOLEAN *)(mSmmMpSyncData->CpuData + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus);\r
1842 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {\r
1843 //\r
1844 // Enable BSP election by setting BspIndex to -1\r
1845 //\r
1846 mSmmMpSyncData->BspIndex = (UINT32)-1;\r
1847 }\r
b43dd229 1848 mSmmMpSyncData->EffectiveSyncMode = mCpuSmmSyncMode;\r
1d648531 1849\r
8b9311b7
JF
1850 mSmmMpSyncData->Counter = mSmmCpuSemaphores.SemaphoreGlobal.Counter;\r
1851 mSmmMpSyncData->InsideSmm = mSmmCpuSemaphores.SemaphoreGlobal.InsideSmm;\r
1852 mSmmMpSyncData->AllCpusInSync = mSmmCpuSemaphores.SemaphoreGlobal.AllCpusInSync;\r
1853 ASSERT (mSmmMpSyncData->Counter != NULL && mSmmMpSyncData->InsideSmm != NULL &&\r
1854 mSmmMpSyncData->AllCpusInSync != NULL);\r
1855 *mSmmMpSyncData->Counter = 0;\r
1856 *mSmmMpSyncData->InsideSmm = FALSE;\r
1857 *mSmmMpSyncData->AllCpusInSync = FALSE;\r
1858\r
1859 for (CpuIndex = 0; CpuIndex < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; CpuIndex ++) {\r
1860 mSmmMpSyncData->CpuData[CpuIndex].Busy =\r
1861 (SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Busy + mSemaphoreSize * CpuIndex);\r
1862 mSmmMpSyncData->CpuData[CpuIndex].Run =\r
1863 (UINT32 *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Run + mSemaphoreSize * CpuIndex);\r
1864 mSmmMpSyncData->CpuData[CpuIndex].Present =\r
1865 (BOOLEAN *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Present + mSemaphoreSize * CpuIndex);\r
56e4a7d7
JF
1866 *(mSmmMpSyncData->CpuData[CpuIndex].Busy) = 0;\r
1867 *(mSmmMpSyncData->CpuData[CpuIndex].Run) = 0;\r
1868 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;\r
8b9311b7 1869 }\r
529a5a86
MK
1870 }\r
1871}\r
1872\r
1873/**\r
1874 Initialize global data for MP synchronization.\r
1875\r
3eb69b08
JY
1876 @param Stacks Base address of SMI stack buffer for all processors.\r
1877 @param StackSize Stack size for each processor in SMM.\r
1878 @param ShadowStackSize Shadow Stack size for each processor in SMM.\r
529a5a86
MK
1879\r
1880**/\r
1881UINT32\r
1882InitializeMpServiceData (\r
1883 IN VOID *Stacks,\r
3eb69b08
JY
1884 IN UINTN StackSize,\r
1885 IN UINTN ShadowStackSize\r
529a5a86
MK
1886 )\r
1887{\r
1888 UINT32 Cr3;\r
1889 UINTN Index;\r
529a5a86 1890 UINT8 *GdtTssTables;\r
529a5a86 1891 UINTN GdtTableStepSize;\r
ba40cb31
MK
1892 CPUID_VERSION_INFO_EDX RegEdx;\r
1893\r
1894 //\r
1895 // Determine if this CPU supports machine check\r
1896 //\r
1897 AsmCpuid (CPUID_VERSION_INFO, NULL, NULL, NULL, &RegEdx.Uint32);\r
1898 mMachineCheckSupported = (BOOLEAN)(RegEdx.Bits.MCA == 1);\r
529a5a86 1899\r
8b9311b7
JF
1900 //\r
1901 // Allocate memory for all locks and semaphores\r
1902 //\r
1903 InitializeSmmCpuSemaphores ();\r
1904\r
d67b73cc
JF
1905 //\r
1906 // Initialize mSmmMpSyncData\r
1907 //\r
1908 mSmmMpSyncDataSize = sizeof (SMM_DISPATCHER_MP_SYNC_DATA) +\r
1909 (sizeof (SMM_CPU_DATA_BLOCK) + sizeof (BOOLEAN)) * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;\r
1910 mSmmMpSyncData = (SMM_DISPATCHER_MP_SYNC_DATA*) AllocatePages (EFI_SIZE_TO_PAGES (mSmmMpSyncDataSize));\r
1911 ASSERT (mSmmMpSyncData != NULL);\r
b43dd229 1912 mCpuSmmSyncMode = (SMM_CPU_SYNC_MODE)PcdGet8 (PcdCpuSmmSyncMode);\r
d67b73cc
JF
1913 InitializeMpSyncData ();\r
1914\r
529a5a86
MK
1915 //\r
1916 // Initialize physical address mask\r
1917 // NOTE: Physical memory above virtual address limit is not supported !!!\r
1918 //\r
1919 AsmCpuid (0x80000008, (UINT32*)&Index, NULL, NULL, NULL);\r
1920 gPhyMask = LShiftU64 (1, (UINT8)Index) - 1;\r
1921 gPhyMask &= (1ull << 48) - EFI_PAGE_SIZE;\r
1922\r
1923 //\r
1924 // Create page tables\r
1925 //\r
1926 Cr3 = SmmInitPageTable ();\r
1927\r
fe5f1949 1928 GdtTssTables = InitGdt (Cr3, &GdtTableStepSize);\r
529a5a86
MK
1929\r
1930 //\r
f12367a0 1931 // Install SMI handler for each CPU\r
529a5a86
MK
1932 //\r
1933 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
529a5a86
MK
1934 InstallSmiHandler (\r
1935 Index,\r
1936 (UINT32)mCpuHotPlugData.SmBase[Index],\r
3eb69b08 1937 (VOID*)((UINTN)Stacks + (StackSize + ShadowStackSize) * Index),\r
529a5a86 1938 StackSize,\r
f12367a0
MK
1939 (UINTN)(GdtTssTables + GdtTableStepSize * Index),\r
1940 gcSmiGdtr.Limit + 1,\r
529a5a86
MK
1941 gcSmiIdtr.Base,\r
1942 gcSmiIdtr.Limit + 1,\r
1943 Cr3\r
1944 );\r
1945 }\r
1946\r
529a5a86
MK
1947 //\r
1948 // Record current MTRR settings\r
1949 //\r
26ab5ac3
MK
1950 ZeroMem (&gSmiMtrrs, sizeof (gSmiMtrrs));\r
1951 MtrrGetAllMtrrs (&gSmiMtrrs);\r
529a5a86
MK
1952\r
1953 return Cr3;\r
1954}\r
1955\r
1956/**\r
1957\r
1958 Register the SMM Foundation entry point.\r
1959\r
1960 @param This Pointer to EFI_SMM_CONFIGURATION_PROTOCOL instance\r
1961 @param SmmEntryPoint SMM Foundation EntryPoint\r
1962\r
1963 @retval EFI_SUCCESS Successfully to register SMM foundation entry point\r
1964\r
1965**/\r
1966EFI_STATUS\r
1967EFIAPI\r
1968RegisterSmmEntry (\r
1969 IN CONST EFI_SMM_CONFIGURATION_PROTOCOL *This,\r
1970 IN EFI_SMM_ENTRY_POINT SmmEntryPoint\r
1971 )\r
1972{\r
1973 //\r
1974 // Record SMM Foundation EntryPoint, later invoke it on SMI entry vector.\r
1975 //\r
1976 gSmmCpuPrivate->SmmCoreEntry = SmmEntryPoint;\r
1977 return EFI_SUCCESS;\r
1978}\r
51dd408a
ED
1979\r
1980/**\r
1981\r
1982 Register the SMM Foundation entry point.\r
1983\r
1984 @param[in] Procedure A pointer to the code stream to be run on the designated target AP\r
1985 of the system. Type EFI_AP_PROCEDURE is defined below in Volume 2\r
1986 with the related definitions of\r
1987 EFI_MP_SERVICES_PROTOCOL.StartupAllAPs.\r
1988 If caller may pass a value of NULL to deregister any existing\r
1989 startup procedure.\r
073f2ced 1990 @param[in,out] ProcedureArguments Allows the caller to pass a list of parameters to the code that is\r
51dd408a
ED
1991 run by the AP. It is an optional common mailbox between APs and\r
1992 the caller to share information\r
1993\r
1994 @retval EFI_SUCCESS The Procedure has been set successfully.\r
1995 @retval EFI_INVALID_PARAMETER The Procedure is NULL but ProcedureArguments not NULL.\r
1996\r
1997**/\r
1998EFI_STATUS\r
1999RegisterStartupProcedure (\r
073f2ced
SZ
2000 IN EFI_AP_PROCEDURE Procedure,\r
2001 IN OUT VOID *ProcedureArguments OPTIONAL\r
51dd408a
ED
2002 )\r
2003{\r
2004 if (Procedure == NULL && ProcedureArguments != NULL) {\r
2005 return EFI_INVALID_PARAMETER;\r
2006 }\r
2007 if (mSmmMpSyncData == NULL) {\r
2008 return EFI_NOT_READY;\r
2009 }\r
2010\r
2011 mSmmMpSyncData->StartupProcedure = Procedure;\r
2012 mSmmMpSyncData->StartupProcArgs = ProcedureArguments;\r
2013\r
2014 return EFI_SUCCESS;\r
2015}\r