]> git.proxmox.com Git - mirror_edk2.git/blame - UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c
UefiCpuPkg/PiSmmCpuDxeSmm: Pre-allocate PROCEDURE_TOKEN buffer
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / MpService.c
CommitLineData
529a5a86
MK
1/** @file\r
2SMM MP service implementation\r
3\r
3eb69b08 4Copyright (c) 2009 - 2019, Intel Corporation. All rights reserved.<BR>\r
241f9149
LD
5Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>\r
6\r
0acd8697 7SPDX-License-Identifier: BSD-2-Clause-Patent\r
529a5a86
MK
8\r
9**/\r
10\r
11#include "PiSmmCpuDxeSmm.h"\r
12\r
13//\r
14// Slots for all MTRR( FIXED MTRR + VARIABLE MTRR + MTRR_LIB_IA32_MTRR_DEF_TYPE)\r
15//\r
26ab5ac3 16MTRR_SETTINGS gSmiMtrrs;\r
529a5a86
MK
17UINT64 gPhyMask;\r
18SMM_DISPATCHER_MP_SYNC_DATA *mSmmMpSyncData = NULL;\r
19UINTN mSmmMpSyncDataSize;\r
1d648531
JF
20SMM_CPU_SEMAPHORES mSmmCpuSemaphores;\r
21UINTN mSemaphoreSize;\r
fe3a75bc 22SPIN_LOCK *mPFLock = NULL;\r
b43dd229 23SMM_CPU_SYNC_MODE mCpuSmmSyncMode;\r
ba40cb31 24BOOLEAN mMachineCheckSupported = FALSE;\r
529a5a86
MK
25\r
26/**\r
27 Performs an atomic compare exchange operation to get semaphore.\r
28 The compare exchange operation must be performed using\r
29 MP safe mechanisms.\r
30\r
31 @param Sem IN: 32-bit unsigned integer\r
32 OUT: original integer - 1\r
33 @return Original integer - 1\r
34\r
35**/\r
36UINT32\r
37WaitForSemaphore (\r
38 IN OUT volatile UINT32 *Sem\r
39 )\r
40{\r
41 UINT32 Value;\r
42\r
43 do {\r
44 Value = *Sem;\r
45 } while (Value == 0 ||\r
46 InterlockedCompareExchange32 (\r
47 (UINT32*)Sem,\r
48 Value,\r
49 Value - 1\r
50 ) != Value);\r
51 return Value - 1;\r
52}\r
53\r
54\r
55/**\r
56 Performs an atomic compare exchange operation to release semaphore.\r
57 The compare exchange operation must be performed using\r
58 MP safe mechanisms.\r
59\r
60 @param Sem IN: 32-bit unsigned integer\r
61 OUT: original integer + 1\r
62 @return Original integer + 1\r
63\r
64**/\r
65UINT32\r
66ReleaseSemaphore (\r
67 IN OUT volatile UINT32 *Sem\r
68 )\r
69{\r
70 UINT32 Value;\r
71\r
72 do {\r
73 Value = *Sem;\r
74 } while (Value + 1 != 0 &&\r
75 InterlockedCompareExchange32 (\r
76 (UINT32*)Sem,\r
77 Value,\r
78 Value + 1\r
79 ) != Value);\r
80 return Value + 1;\r
81}\r
82\r
83/**\r
84 Performs an atomic compare exchange operation to lock semaphore.\r
85 The compare exchange operation must be performed using\r
86 MP safe mechanisms.\r
87\r
88 @param Sem IN: 32-bit unsigned integer\r
89 OUT: -1\r
90 @return Original integer\r
91\r
92**/\r
93UINT32\r
94LockdownSemaphore (\r
95 IN OUT volatile UINT32 *Sem\r
96 )\r
97{\r
98 UINT32 Value;\r
99\r
100 do {\r
101 Value = *Sem;\r
102 } while (InterlockedCompareExchange32 (\r
103 (UINT32*)Sem,\r
104 Value, (UINT32)-1\r
105 ) != Value);\r
106 return Value;\r
107}\r
108\r
109/**\r
110 Wait all APs to performs an atomic compare exchange operation to release semaphore.\r
111\r
112 @param NumberOfAPs AP number\r
113\r
114**/\r
115VOID\r
116WaitForAllAPs (\r
117 IN UINTN NumberOfAPs\r
118 )\r
119{\r
120 UINTN BspIndex;\r
121\r
122 BspIndex = mSmmMpSyncData->BspIndex;\r
123 while (NumberOfAPs-- > 0) {\r
ed3d5ecb 124 WaitForSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
529a5a86
MK
125 }\r
126}\r
127\r
128/**\r
129 Performs an atomic compare exchange operation to release semaphore\r
130 for each AP.\r
131\r
132**/\r
133VOID\r
134ReleaseAllAPs (\r
135 VOID\r
136 )\r
137{\r
138 UINTN Index;\r
529a5a86 139\r
123b720e 140 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
51dd408a 141 if (IsPresentAp (Index)) {\r
ed3d5ecb 142 ReleaseSemaphore (mSmmMpSyncData->CpuData[Index].Run);\r
529a5a86
MK
143 }\r
144 }\r
145}\r
146\r
147/**\r
148 Checks if all CPUs (with certain exceptions) have checked in for this SMI run\r
149\r
150 @param Exceptions CPU Arrival exception flags.\r
151\r
152 @retval TRUE if all CPUs the have checked in.\r
153 @retval FALSE if at least one Normal AP hasn't checked in.\r
154\r
155**/\r
156BOOLEAN\r
157AllCpusInSmmWithExceptions (\r
158 SMM_CPU_ARRIVAL_EXCEPTIONS Exceptions\r
159 )\r
160{\r
161 UINTN Index;\r
162 SMM_CPU_DATA_BLOCK *CpuData;\r
163 EFI_PROCESSOR_INFORMATION *ProcessorInfo;\r
164\r
fe3a75bc 165 ASSERT (*mSmmMpSyncData->Counter <= mNumberOfCpus);\r
529a5a86 166\r
fe3a75bc 167 if (*mSmmMpSyncData->Counter == mNumberOfCpus) {\r
529a5a86
MK
168 return TRUE;\r
169 }\r
170\r
171 CpuData = mSmmMpSyncData->CpuData;\r
172 ProcessorInfo = gSmmCpuPrivate->ProcessorInfo;\r
123b720e 173 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
ed3d5ecb 174 if (!(*(CpuData[Index].Present)) && ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID) {\r
529a5a86
MK
175 if (((Exceptions & ARRIVAL_EXCEPTION_DELAYED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmDelayed) != 0) {\r
176 continue;\r
177 }\r
178 if (((Exceptions & ARRIVAL_EXCEPTION_BLOCKED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmBlocked) != 0) {\r
179 continue;\r
180 }\r
181 if (((Exceptions & ARRIVAL_EXCEPTION_SMI_DISABLED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmEnable) != 0) {\r
182 continue;\r
183 }\r
184 return FALSE;\r
185 }\r
186 }\r
187\r
188\r
189 return TRUE;\r
190}\r
191\r
12c66382
ED
192/**\r
193 Has OS enabled Lmce in the MSR_IA32_MCG_EXT_CTL\r
7367cc6c 194\r
12c66382
ED
195 @retval TRUE Os enable lmce.\r
196 @retval FALSE Os not enable lmce.\r
197\r
198**/\r
199BOOLEAN\r
200IsLmceOsEnabled (\r
201 VOID\r
202 )\r
203{\r
204 MSR_IA32_MCG_CAP_REGISTER McgCap;\r
205 MSR_IA32_FEATURE_CONTROL_REGISTER FeatureCtrl;\r
206 MSR_IA32_MCG_EXT_CTL_REGISTER McgExtCtrl;\r
207\r
208 McgCap.Uint64 = AsmReadMsr64 (MSR_IA32_MCG_CAP);\r
209 if (McgCap.Bits.MCG_LMCE_P == 0) {\r
210 return FALSE;\r
211 }\r
212\r
213 FeatureCtrl.Uint64 = AsmReadMsr64 (MSR_IA32_FEATURE_CONTROL);\r
214 if (FeatureCtrl.Bits.LmceOn == 0) {\r
215 return FALSE;\r
216 }\r
217\r
218 McgExtCtrl.Uint64 = AsmReadMsr64 (MSR_IA32_MCG_EXT_CTL);\r
219 return (BOOLEAN) (McgExtCtrl.Bits.LMCE_EN == 1);\r
220}\r
221\r
222/**\r
7367cc6c 223 Return if Local machine check exception signaled.\r
12c66382 224\r
7367cc6c 225 Indicates (when set) that a local machine check exception was generated. This indicates that the current machine-check event was\r
12c66382
ED
226 delivered to only the logical processor.\r
227\r
228 @retval TRUE LMCE was signaled.\r
229 @retval FALSE LMCE was not signaled.\r
230\r
231**/\r
232BOOLEAN\r
233IsLmceSignaled (\r
234 VOID\r
235 )\r
236{\r
237 MSR_IA32_MCG_STATUS_REGISTER McgStatus;\r
238\r
239 McgStatus.Uint64 = AsmReadMsr64 (MSR_IA32_MCG_STATUS);\r
240 return (BOOLEAN) (McgStatus.Bits.LMCE_S == 1);\r
241}\r
529a5a86
MK
242\r
243/**\r
244 Given timeout constraint, wait for all APs to arrive, and insure when this function returns, no AP will execute normal mode code before\r
245 entering SMM, except SMI disabled APs.\r
246\r
247**/\r
248VOID\r
249SmmWaitForApArrival (\r
250 VOID\r
251 )\r
252{\r
253 UINT64 Timer;\r
254 UINTN Index;\r
12c66382
ED
255 BOOLEAN LmceEn;\r
256 BOOLEAN LmceSignal;\r
529a5a86 257\r
fe3a75bc 258 ASSERT (*mSmmMpSyncData->Counter <= mNumberOfCpus);\r
529a5a86 259\r
ba40cb31
MK
260 LmceEn = FALSE;\r
261 LmceSignal = FALSE;\r
262 if (mMachineCheckSupported) {\r
263 LmceEn = IsLmceOsEnabled ();\r
264 LmceSignal = IsLmceSignaled();\r
265 }\r
12c66382 266\r
529a5a86
MK
267 //\r
268 // Platform implementor should choose a timeout value appropriately:\r
269 // - The timeout value should balance the SMM time constrains and the likelihood that delayed CPUs are excluded in the SMM run. Note\r
270 // the SMI Handlers must ALWAYS take into account the cases that not all APs are available in an SMI run.\r
271 // - The timeout value must, in the case of 2nd timeout, be at least long enough to give time for all APs to receive the SMI IPI\r
272 // and either enter SMM or buffer the SMI, to insure there is no CPU running normal mode code when SMI handling starts. This will\r
273 // be TRUE even if a blocked CPU is brought out of the blocked state by a normal mode CPU (before the normal mode CPU received the\r
274 // SMI IPI), because with a buffered SMI, and CPU will enter SMM immediately after it is brought out of the blocked state.\r
275 // - The timeout value must be longer than longest possible IO operation in the system\r
276 //\r
277\r
278 //\r
279 // Sync with APs 1st timeout\r
280 //\r
281 for (Timer = StartSyncTimer ();\r
12c66382 282 !IsSyncTimerTimeout (Timer) && !(LmceEn && LmceSignal) &&\r
529a5a86
MK
283 !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED | ARRIVAL_EXCEPTION_SMI_DISABLED );\r
284 ) {\r
285 CpuPause ();\r
286 }\r
287\r
288 //\r
289 // Not all APs have arrived, so we need 2nd round of timeout. IPIs should be sent to ALL none present APs,\r
290 // because:\r
291 // a) Delayed AP may have just come out of the delayed state. Blocked AP may have just been brought out of blocked state by some AP running\r
292 // normal mode code. These APs need to be guaranteed to have an SMI pending to insure that once they are out of delayed / blocked state, they\r
293 // enter SMI immediately without executing instructions in normal mode. Note traditional flow requires there are no APs doing normal mode\r
294 // work while SMI handling is on-going.\r
295 // b) As a consequence of SMI IPI sending, (spurious) SMI may occur after this SMM run.\r
296 // c) ** NOTE **: Use SMI disabling feature VERY CAREFULLY (if at all) for traditional flow, because a processor in SMI-disabled state\r
297 // will execute normal mode code, which breaks the traditional SMI handlers' assumption that no APs are doing normal\r
298 // mode work while SMI handling is on-going.\r
299 // d) We don't add code to check SMI disabling status to skip sending IPI to SMI disabled APs, because:\r
300 // - In traditional flow, SMI disabling is discouraged.\r
301 // - In relaxed flow, CheckApArrival() will check SMI disabling status before calling this function.\r
302 // In both cases, adding SMI-disabling checking code increases overhead.\r
303 //\r
fe3a75bc 304 if (*mSmmMpSyncData->Counter < mNumberOfCpus) {\r
529a5a86
MK
305 //\r
306 // Send SMI IPIs to bring outside processors in\r
307 //\r
123b720e 308 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
ed3d5ecb 309 if (!(*(mSmmMpSyncData->CpuData[Index].Present)) && gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID) {\r
529a5a86
MK
310 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId);\r
311 }\r
312 }\r
313\r
314 //\r
315 // Sync with APs 2nd timeout.\r
316 //\r
317 for (Timer = StartSyncTimer ();\r
318 !IsSyncTimerTimeout (Timer) &&\r
319 !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED | ARRIVAL_EXCEPTION_SMI_DISABLED );\r
320 ) {\r
321 CpuPause ();\r
322 }\r
323 }\r
324\r
325 return;\r
326}\r
327\r
328\r
329/**\r
330 Replace OS MTRR's with SMI MTRR's.\r
331\r
332 @param CpuIndex Processor Index\r
333\r
334**/\r
335VOID\r
336ReplaceOSMtrrs (\r
337 IN UINTN CpuIndex\r
338 )\r
339{\r
529a5a86
MK
340 SmmCpuFeaturesDisableSmrr ();\r
341\r
342 //\r
343 // Replace all MTRRs registers\r
344 //\r
26ab5ac3 345 MtrrSetAllMtrrs (&gSmiMtrrs);\r
529a5a86
MK
346}\r
347\r
51dd408a
ED
348/**\r
349 Wheck whether task has been finished by all APs.\r
350\r
351 @param BlockMode Whether did it in block mode or non-block mode.\r
352\r
353 @retval TRUE Task has been finished by all APs.\r
354 @retval FALSE Task not has been finished by all APs.\r
355\r
356**/\r
357BOOLEAN\r
358WaitForAllAPsNotBusy (\r
359 IN BOOLEAN BlockMode\r
360 )\r
361{\r
362 UINTN Index;\r
363\r
123b720e 364 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
51dd408a
ED
365 //\r
366 // Ignore BSP and APs which not call in SMM.\r
367 //\r
368 if (!IsPresentAp(Index)) {\r
369 continue;\r
370 }\r
371\r
372 if (BlockMode) {\r
373 AcquireSpinLock(mSmmMpSyncData->CpuData[Index].Busy);\r
374 ReleaseSpinLock(mSmmMpSyncData->CpuData[Index].Busy);\r
375 } else {\r
376 if (AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[Index].Busy)) {\r
377 ReleaseSpinLock(mSmmMpSyncData->CpuData[Index].Busy);\r
378 } else {\r
379 return FALSE;\r
380 }\r
381 }\r
382 }\r
383\r
384 return TRUE;\r
385}\r
386\r
387/**\r
388 Check whether it is an present AP.\r
389\r
390 @param CpuIndex The AP index which calls this function.\r
391\r
392 @retval TRUE It's a present AP.\r
393 @retval TRUE This is not an AP or it is not present.\r
394\r
395**/\r
396BOOLEAN\r
397IsPresentAp (\r
398 IN UINTN CpuIndex\r
399 )\r
400{\r
401 return ((CpuIndex != gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) &&\r
402 *(mSmmMpSyncData->CpuData[CpuIndex].Present));\r
403}\r
404\r
51dd408a
ED
405/**\r
406 Clean up the status flags used during executing the procedure.\r
407\r
408 @param CpuIndex The AP index which calls this function.\r
409\r
410**/\r
411VOID\r
412ReleaseToken (\r
413 IN UINTN CpuIndex\r
414 )\r
415{\r
a457823f 416 PROCEDURE_TOKEN *Token;\r
51dd408a 417\r
a457823f
ED
418 Token = mSmmMpSyncData->CpuData[CpuIndex].Token;\r
419\r
420 if (InterlockedDecrement (&Token->RunningApCount) == 0) {\r
421 ReleaseSpinLock (Token->SpinLock);\r
51dd408a 422 }\r
a457823f
ED
423\r
424 mSmmMpSyncData->CpuData[CpuIndex].Token = NULL;\r
51dd408a
ED
425}\r
426\r
427/**\r
428 Free the tokens in the maintained list.\r
429\r
430**/\r
431VOID\r
b948a496 432ResetTokens (\r
51dd408a
ED
433 VOID\r
434 )\r
435{\r
436 LIST_ENTRY *Link;\r
437 PROCEDURE_TOKEN *ProcToken;\r
9caaa79d 438\r
b948a496
ED
439 Link = GetFirstNode (&gSmmCpuPrivate->TokenList);\r
440 while (!IsNull (&gSmmCpuPrivate->TokenList, Link)) {\r
51dd408a
ED
441 ProcToken = PROCEDURE_TOKEN_FROM_LINK (Link);\r
442\r
b948a496
ED
443 ProcToken->RunningApCount = 0;\r
444 ProcToken->Used = FALSE;\r
445\r
446 //\r
447 // Check the spinlock status and release it if not released yet.\r
448 //\r
449 if (!AcquireSpinLockOrFail(ProcToken->SpinLock)) {\r
450 DEBUG((DEBUG_ERROR, "Risk::SpinLock still not released!"));\r
451 }\r
452 ReleaseSpinLock (ProcToken->SpinLock);\r
51dd408a 453\r
b948a496 454 Link = GetNextNode (&gSmmCpuPrivate->TokenList, Link);\r
51dd408a
ED
455 }\r
456}\r
457\r
529a5a86
MK
458/**\r
459 SMI handler for BSP.\r
460\r
461 @param CpuIndex BSP processor Index\r
462 @param SyncMode SMM MP sync mode\r
463\r
464**/\r
465VOID\r
466BSPHandler (\r
467 IN UINTN CpuIndex,\r
468 IN SMM_CPU_SYNC_MODE SyncMode\r
469 )\r
470{\r
471 UINTN Index;\r
472 MTRR_SETTINGS Mtrrs;\r
473 UINTN ApCount;\r
474 BOOLEAN ClearTopLevelSmiResult;\r
475 UINTN PresentCount;\r
476\r
477 ASSERT (CpuIndex == mSmmMpSyncData->BspIndex);\r
478 ApCount = 0;\r
479\r
480 //\r
481 // Flag BSP's presence\r
482 //\r
fe3a75bc 483 *mSmmMpSyncData->InsideSmm = TRUE;\r
529a5a86
MK
484\r
485 //\r
486 // Initialize Debug Agent to start source level debug in BSP handler\r
487 //\r
488 InitializeDebugAgent (DEBUG_AGENT_INIT_ENTER_SMI, NULL, NULL);\r
489\r
490 //\r
491 // Mark this processor's presence\r
492 //\r
ed3d5ecb 493 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = TRUE;\r
529a5a86
MK
494\r
495 //\r
496 // Clear platform top level SMI status bit before calling SMI handlers. If\r
497 // we cleared it after SMI handlers are run, we would miss the SMI that\r
498 // occurs after SMI handlers are done and before SMI status bit is cleared.\r
499 //\r
500 ClearTopLevelSmiResult = ClearTopLevelSmiStatus();\r
501 ASSERT (ClearTopLevelSmiResult == TRUE);\r
502\r
503 //\r
504 // Set running processor index\r
505 //\r
506 gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu = CpuIndex;\r
507\r
508 //\r
509 // If Traditional Sync Mode or need to configure MTRRs: gather all available APs.\r
510 //\r
511 if (SyncMode == SmmCpuSyncModeTradition || SmmCpuFeaturesNeedConfigureMtrrs()) {\r
512\r
513 //\r
514 // Wait for APs to arrive\r
515 //\r
516 SmmWaitForApArrival();\r
517\r
518 //\r
519 // Lock the counter down and retrieve the number of APs\r
520 //\r
fe3a75bc
JF
521 *mSmmMpSyncData->AllCpusInSync = TRUE;\r
522 ApCount = LockdownSemaphore (mSmmMpSyncData->Counter) - 1;\r
529a5a86
MK
523\r
524 //\r
525 // Wait for all APs to get ready for programming MTRRs\r
526 //\r
527 WaitForAllAPs (ApCount);\r
528\r
529 if (SmmCpuFeaturesNeedConfigureMtrrs()) {\r
530 //\r
531 // Signal all APs it's time for backup MTRRs\r
532 //\r
533 ReleaseAllAPs ();\r
534\r
535 //\r
536 // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at\r
537 // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set\r
538 // to a large enough value to avoid this situation.\r
539 // Note: For HT capable CPUs, threads within a core share the same set of MTRRs.\r
540 // We do the backup first and then set MTRR to avoid race condition for threads\r
541 // in the same core.\r
542 //\r
543 MtrrGetAllMtrrs(&Mtrrs);\r
544\r
545 //\r
546 // Wait for all APs to complete their MTRR saving\r
547 //\r
548 WaitForAllAPs (ApCount);\r
549\r
550 //\r
551 // Let all processors program SMM MTRRs together\r
552 //\r
553 ReleaseAllAPs ();\r
554\r
555 //\r
556 // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at\r
557 // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set\r
558 // to a large enough value to avoid this situation.\r
559 //\r
560 ReplaceOSMtrrs (CpuIndex);\r
561\r
562 //\r
563 // Wait for all APs to complete their MTRR programming\r
564 //\r
565 WaitForAllAPs (ApCount);\r
566 }\r
567 }\r
568\r
569 //\r
570 // The BUSY lock is initialized to Acquired state\r
571 //\r
170a3c1e 572 AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
529a5a86
MK
573\r
574 //\r
9f419739 575 // Perform the pre tasks\r
529a5a86 576 //\r
9f419739 577 PerformPreTasks ();\r
529a5a86
MK
578\r
579 //\r
580 // Invoke SMM Foundation EntryPoint with the processor information context.\r
581 //\r
582 gSmmCpuPrivate->SmmCoreEntry (&gSmmCpuPrivate->SmmCoreEntryContext);\r
583\r
584 //\r
585 // Make sure all APs have completed their pending none-block tasks\r
586 //\r
51dd408a 587 WaitForAllAPsNotBusy (TRUE);\r
529a5a86
MK
588\r
589 //\r
590 // Perform the remaining tasks\r
591 //\r
592 PerformRemainingTasks ();\r
593\r
594 //\r
595 // If Relaxed-AP Sync Mode: gather all available APs after BSP SMM handlers are done, and\r
596 // make those APs to exit SMI synchronously. APs which arrive later will be excluded and\r
597 // will run through freely.\r
598 //\r
599 if (SyncMode != SmmCpuSyncModeTradition && !SmmCpuFeaturesNeedConfigureMtrrs()) {\r
600\r
601 //\r
602 // Lock the counter down and retrieve the number of APs\r
603 //\r
fe3a75bc
JF
604 *mSmmMpSyncData->AllCpusInSync = TRUE;\r
605 ApCount = LockdownSemaphore (mSmmMpSyncData->Counter) - 1;\r
529a5a86
MK
606 //\r
607 // Make sure all APs have their Present flag set\r
608 //\r
609 while (TRUE) {\r
610 PresentCount = 0;\r
123b720e 611 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
ed3d5ecb 612 if (*(mSmmMpSyncData->CpuData[Index].Present)) {\r
529a5a86
MK
613 PresentCount ++;\r
614 }\r
615 }\r
616 if (PresentCount > ApCount) {\r
617 break;\r
618 }\r
619 }\r
620 }\r
621\r
622 //\r
623 // Notify all APs to exit\r
624 //\r
fe3a75bc 625 *mSmmMpSyncData->InsideSmm = FALSE;\r
529a5a86
MK
626 ReleaseAllAPs ();\r
627\r
628 //\r
629 // Wait for all APs to complete their pending tasks\r
630 //\r
631 WaitForAllAPs (ApCount);\r
632\r
633 if (SmmCpuFeaturesNeedConfigureMtrrs()) {\r
634 //\r
635 // Signal APs to restore MTRRs\r
636 //\r
637 ReleaseAllAPs ();\r
638\r
639 //\r
640 // Restore OS MTRRs\r
641 //\r
642 SmmCpuFeaturesReenableSmrr ();\r
643 MtrrSetAllMtrrs(&Mtrrs);\r
644\r
645 //\r
646 // Wait for all APs to complete MTRR programming\r
647 //\r
648 WaitForAllAPs (ApCount);\r
649 }\r
650\r
651 //\r
652 // Stop source level debug in BSP handler, the code below will not be\r
653 // debugged.\r
654 //\r
655 InitializeDebugAgent (DEBUG_AGENT_INIT_EXIT_SMI, NULL, NULL);\r
656\r
657 //\r
658 // Signal APs to Reset states/semaphore for this processor\r
659 //\r
660 ReleaseAllAPs ();\r
661\r
662 //\r
663 // Perform pending operations for hot-plug\r
664 //\r
665 SmmCpuUpdate ();\r
666\r
667 //\r
668 // Clear the Present flag of BSP\r
669 //\r
ed3d5ecb 670 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;\r
529a5a86
MK
671\r
672 //\r
673 // Gather APs to exit SMM synchronously. Note the Present flag is cleared by now but\r
674 // WaitForAllAps does not depend on the Present flag.\r
675 //\r
676 WaitForAllAPs (ApCount);\r
677\r
51dd408a 678 //\r
b948a496 679 // Reset the tokens buffer.\r
51dd408a 680 //\r
b948a496 681 ResetTokens ();\r
51dd408a 682\r
529a5a86
MK
683 //\r
684 // Reset BspIndex to -1, meaning BSP has not been elected.\r
685 //\r
686 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {\r
687 mSmmMpSyncData->BspIndex = (UINT32)-1;\r
688 }\r
689\r
690 //\r
691 // Allow APs to check in from this point on\r
692 //\r
fe3a75bc
JF
693 *mSmmMpSyncData->Counter = 0;\r
694 *mSmmMpSyncData->AllCpusInSync = FALSE;\r
529a5a86
MK
695}\r
696\r
697/**\r
698 SMI handler for AP.\r
699\r
700 @param CpuIndex AP processor Index.\r
701 @param ValidSmi Indicates that current SMI is a valid SMI or not.\r
702 @param SyncMode SMM MP sync mode.\r
703\r
704**/\r
705VOID\r
706APHandler (\r
707 IN UINTN CpuIndex,\r
708 IN BOOLEAN ValidSmi,\r
709 IN SMM_CPU_SYNC_MODE SyncMode\r
710 )\r
711{\r
712 UINT64 Timer;\r
713 UINTN BspIndex;\r
714 MTRR_SETTINGS Mtrrs;\r
51dd408a 715 EFI_STATUS ProcedureStatus;\r
529a5a86
MK
716\r
717 //\r
718 // Timeout BSP\r
719 //\r
720 for (Timer = StartSyncTimer ();\r
721 !IsSyncTimerTimeout (Timer) &&\r
fe3a75bc 722 !(*mSmmMpSyncData->InsideSmm);\r
529a5a86
MK
723 ) {\r
724 CpuPause ();\r
725 }\r
726\r
fe3a75bc 727 if (!(*mSmmMpSyncData->InsideSmm)) {\r
529a5a86
MK
728 //\r
729 // BSP timeout in the first round\r
730 //\r
731 if (mSmmMpSyncData->BspIndex != -1) {\r
732 //\r
733 // BSP Index is known\r
734 //\r
735 BspIndex = mSmmMpSyncData->BspIndex;\r
736 ASSERT (CpuIndex != BspIndex);\r
737\r
738 //\r
739 // Send SMI IPI to bring BSP in\r
740 //\r
741 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[BspIndex].ProcessorId);\r
742\r
743 //\r
744 // Now clock BSP for the 2nd time\r
745 //\r
746 for (Timer = StartSyncTimer ();\r
747 !IsSyncTimerTimeout (Timer) &&\r
fe3a75bc 748 !(*mSmmMpSyncData->InsideSmm);\r
529a5a86
MK
749 ) {\r
750 CpuPause ();\r
751 }\r
752\r
fe3a75bc 753 if (!(*mSmmMpSyncData->InsideSmm)) {\r
529a5a86
MK
754 //\r
755 // Give up since BSP is unable to enter SMM\r
756 // and signal the completion of this AP\r
fe3a75bc 757 WaitForSemaphore (mSmmMpSyncData->Counter);\r
529a5a86
MK
758 return;\r
759 }\r
760 } else {\r
761 //\r
762 // Don't know BSP index. Give up without sending IPI to BSP.\r
763 //\r
fe3a75bc 764 WaitForSemaphore (mSmmMpSyncData->Counter);\r
529a5a86
MK
765 return;\r
766 }\r
767 }\r
768\r
769 //\r
770 // BSP is available\r
771 //\r
772 BspIndex = mSmmMpSyncData->BspIndex;\r
773 ASSERT (CpuIndex != BspIndex);\r
774\r
775 //\r
776 // Mark this processor's presence\r
777 //\r
ed3d5ecb 778 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = TRUE;\r
529a5a86
MK
779\r
780 if (SyncMode == SmmCpuSyncModeTradition || SmmCpuFeaturesNeedConfigureMtrrs()) {\r
781 //\r
782 // Notify BSP of arrival at this point\r
783 //\r
ed3d5ecb 784 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
529a5a86
MK
785 }\r
786\r
787 if (SmmCpuFeaturesNeedConfigureMtrrs()) {\r
788 //\r
789 // Wait for the signal from BSP to backup MTRRs\r
790 //\r
ed3d5ecb 791 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r
529a5a86
MK
792\r
793 //\r
794 // Backup OS MTRRs\r
795 //\r
796 MtrrGetAllMtrrs(&Mtrrs);\r
797\r
798 //\r
799 // Signal BSP the completion of this AP\r
800 //\r
ed3d5ecb 801 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
529a5a86
MK
802\r
803 //\r
804 // Wait for BSP's signal to program MTRRs\r
805 //\r
ed3d5ecb 806 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r
529a5a86
MK
807\r
808 //\r
809 // Replace OS MTRRs with SMI MTRRs\r
810 //\r
811 ReplaceOSMtrrs (CpuIndex);\r
812\r
813 //\r
814 // Signal BSP the completion of this AP\r
815 //\r
ed3d5ecb 816 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
529a5a86
MK
817 }\r
818\r
819 while (TRUE) {\r
820 //\r
821 // Wait for something to happen\r
822 //\r
ed3d5ecb 823 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r
529a5a86
MK
824\r
825 //\r
826 // Check if BSP wants to exit SMM\r
827 //\r
fe3a75bc 828 if (!(*mSmmMpSyncData->InsideSmm)) {\r
529a5a86
MK
829 break;\r
830 }\r
831\r
832 //\r
833 // BUSY should be acquired by SmmStartupThisAp()\r
834 //\r
835 ASSERT (\r
ed3d5ecb 836 !AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[CpuIndex].Busy)\r
529a5a86
MK
837 );\r
838\r
839 //\r
840 // Invoke the scheduled procedure\r
841 //\r
51dd408a
ED
842 ProcedureStatus = (*mSmmMpSyncData->CpuData[CpuIndex].Procedure) (\r
843 (VOID*)mSmmMpSyncData->CpuData[CpuIndex].Parameter\r
844 );\r
845 if (mSmmMpSyncData->CpuData[CpuIndex].Status != NULL) {\r
846 *mSmmMpSyncData->CpuData[CpuIndex].Status = ProcedureStatus;\r
847 }\r
529a5a86 848\r
a457823f
ED
849 if (mSmmMpSyncData->CpuData[CpuIndex].Token != NULL) {\r
850 ReleaseToken (CpuIndex);\r
851 }\r
852\r
529a5a86
MK
853 //\r
854 // Release BUSY\r
855 //\r
ed3d5ecb 856 ReleaseSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
529a5a86
MK
857 }\r
858\r
859 if (SmmCpuFeaturesNeedConfigureMtrrs()) {\r
860 //\r
861 // Notify BSP the readiness of this AP to program MTRRs\r
862 //\r
ed3d5ecb 863 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
529a5a86
MK
864\r
865 //\r
866 // Wait for the signal from BSP to program MTRRs\r
867 //\r
ed3d5ecb 868 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r
529a5a86
MK
869\r
870 //\r
871 // Restore OS MTRRs\r
872 //\r
873 SmmCpuFeaturesReenableSmrr ();\r
874 MtrrSetAllMtrrs(&Mtrrs);\r
875 }\r
876\r
877 //\r
878 // Notify BSP the readiness of this AP to Reset states/semaphore for this processor\r
879 //\r
ed3d5ecb 880 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
529a5a86
MK
881\r
882 //\r
883 // Wait for the signal from BSP to Reset states/semaphore for this processor\r
884 //\r
ed3d5ecb 885 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r
529a5a86
MK
886\r
887 //\r
888 // Reset states/semaphore for this processor\r
889 //\r
ed3d5ecb 890 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;\r
529a5a86
MK
891\r
892 //\r
893 // Notify BSP the readiness of this AP to exit SMM\r
894 //\r
ed3d5ecb 895 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
529a5a86
MK
896\r
897}\r
898\r
899/**\r
900 Create 4G PageTable in SMRAM.\r
901\r
717fb604 902 @param[in] Is32BitPageTable Whether the page table is 32-bit PAE\r
529a5a86
MK
903 @return PageTable Address\r
904\r
905**/\r
906UINT32\r
907Gen4GPageTable (\r
881520ea 908 IN BOOLEAN Is32BitPageTable\r
529a5a86
MK
909 )\r
910{\r
911 VOID *PageTable;\r
912 UINTN Index;\r
913 UINT64 *Pte;\r
914 UINTN PagesNeeded;\r
915 UINTN Low2MBoundary;\r
916 UINTN High2MBoundary;\r
917 UINTN Pages;\r
918 UINTN GuardPage;\r
919 UINT64 *Pdpte;\r
920 UINTN PageIndex;\r
921 UINTN PageAddress;\r
922\r
923 Low2MBoundary = 0;\r
924 High2MBoundary = 0;\r
925 PagesNeeded = 0;\r
926 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
927 //\r
928 // Add one more page for known good stack, then find the lower 2MB aligned address.\r
929 //\r
930 Low2MBoundary = (mSmmStackArrayBase + EFI_PAGE_SIZE) & ~(SIZE_2MB-1);\r
931 //\r
932 // Add two more pages for known good stack and stack guard page,\r
933 // then find the lower 2MB aligned address.\r
934 //\r
935 High2MBoundary = (mSmmStackArrayEnd - mSmmStackSize + EFI_PAGE_SIZE * 2) & ~(SIZE_2MB-1);\r
936 PagesNeeded = ((High2MBoundary - Low2MBoundary) / SIZE_2MB) + 1;\r
937 }\r
938 //\r
939 // Allocate the page table\r
940 //\r
717fb604 941 PageTable = AllocatePageTableMemory (5 + PagesNeeded);\r
529a5a86
MK
942 ASSERT (PageTable != NULL);\r
943\r
717fb604 944 PageTable = (VOID *)((UINTN)PageTable);\r
529a5a86
MK
945 Pte = (UINT64*)PageTable;\r
946\r
947 //\r
948 // Zero out all page table entries first\r
949 //\r
950 ZeroMem (Pte, EFI_PAGES_TO_SIZE (1));\r
951\r
952 //\r
953 // Set Page Directory Pointers\r
954 //\r
955 for (Index = 0; Index < 4; Index++) {\r
e62a0eb6 956 Pte[Index] = ((UINTN)PageTable + EFI_PAGE_SIZE * (Index + 1)) | mAddressEncMask |\r
241f9149 957 (Is32BitPageTable ? IA32_PAE_PDPTE_ATTRIBUTE_BITS : PAGE_ATTRIBUTE_BITS);\r
529a5a86
MK
958 }\r
959 Pte += EFI_PAGE_SIZE / sizeof (*Pte);\r
960\r
961 //\r
962 // Fill in Page Directory Entries\r
963 //\r
964 for (Index = 0; Index < EFI_PAGE_SIZE * 4 / sizeof (*Pte); Index++) {\r
241f9149 965 Pte[Index] = (Index << 21) | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;\r
529a5a86
MK
966 }\r
967\r
f8c1133b 968 Pdpte = (UINT64*)PageTable;\r
529a5a86
MK
969 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
970 Pages = (UINTN)PageTable + EFI_PAGES_TO_SIZE (5);\r
971 GuardPage = mSmmStackArrayBase + EFI_PAGE_SIZE;\r
529a5a86 972 for (PageIndex = Low2MBoundary; PageIndex <= High2MBoundary; PageIndex += SIZE_2MB) {\r
241f9149
LD
973 Pte = (UINT64*)(UINTN)(Pdpte[BitFieldRead32 ((UINT32)PageIndex, 30, 31)] & ~mAddressEncMask & ~(EFI_PAGE_SIZE - 1));\r
974 Pte[BitFieldRead32 ((UINT32)PageIndex, 21, 29)] = (UINT64)Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
529a5a86
MK
975 //\r
976 // Fill in Page Table Entries\r
977 //\r
978 Pte = (UINT64*)Pages;\r
979 PageAddress = PageIndex;\r
980 for (Index = 0; Index < EFI_PAGE_SIZE / sizeof (*Pte); Index++) {\r
981 if (PageAddress == GuardPage) {\r
982 //\r
983 // Mark the guard page as non-present\r
984 //\r
241f9149 985 Pte[Index] = PageAddress | mAddressEncMask;\r
529a5a86
MK
986 GuardPage += mSmmStackSize;\r
987 if (GuardPage > mSmmStackArrayEnd) {\r
988 GuardPage = 0;\r
989 }\r
990 } else {\r
241f9149 991 Pte[Index] = PageAddress | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
529a5a86
MK
992 }\r
993 PageAddress+= EFI_PAGE_SIZE;\r
994 }\r
995 Pages += EFI_PAGE_SIZE;\r
996 }\r
997 }\r
998\r
f8c1133b
JW
999 if ((PcdGet8 (PcdNullPointerDetectionPropertyMask) & BIT1) != 0) {\r
1000 Pte = (UINT64*)(UINTN)(Pdpte[0] & ~mAddressEncMask & ~(EFI_PAGE_SIZE - 1));\r
1001 if ((Pte[0] & IA32_PG_PS) == 0) {\r
1002 // 4K-page entries are already mapped. Just hide the first one anyway.\r
1003 Pte = (UINT64*)(UINTN)(Pte[0] & ~mAddressEncMask & ~(EFI_PAGE_SIZE - 1));\r
79da2d28 1004 Pte[0] &= ~(UINT64)IA32_PG_P; // Hide page 0\r
f8c1133b
JW
1005 } else {\r
1006 // Create 4K-page entries\r
1007 Pages = (UINTN)AllocatePageTableMemory (1);\r
1008 ASSERT (Pages != 0);\r
1009\r
1010 Pte[0] = (UINT64)(Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS);\r
1011\r
1012 Pte = (UINT64*)Pages;\r
1013 PageAddress = 0;\r
1014 Pte[0] = PageAddress | mAddressEncMask; // Hide page 0 but present left\r
1015 for (Index = 1; Index < EFI_PAGE_SIZE / sizeof (*Pte); Index++) {\r
1016 PageAddress += EFI_PAGE_SIZE;\r
1017 Pte[Index] = PageAddress | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
1018 }\r
1019 }\r
1020 }\r
1021\r
529a5a86
MK
1022 return (UINT32)(UINTN)PageTable;\r
1023}\r
1024\r
51dd408a
ED
1025/**\r
1026 Checks whether the input token is the current used token.\r
1027\r
1028 @param[in] Token This parameter describes the token that was passed into DispatchProcedure or\r
1029 BroadcastProcedure.\r
1030\r
1031 @retval TRUE The input token is the current used token.\r
1032 @retval FALSE The input token is not the current used token.\r
1033**/\r
1034BOOLEAN\r
1035IsTokenInUse (\r
1036 IN SPIN_LOCK *Token\r
1037 )\r
1038{\r
1039 LIST_ENTRY *Link;\r
1040 PROCEDURE_TOKEN *ProcToken;\r
1041\r
1042 if (Token == NULL) {\r
1043 return FALSE;\r
1044 }\r
1045\r
1046 Link = GetFirstNode (&gSmmCpuPrivate->TokenList);\r
1047 while (!IsNull (&gSmmCpuPrivate->TokenList, Link)) {\r
1048 ProcToken = PROCEDURE_TOKEN_FROM_LINK (Link);\r
1049\r
b948a496 1050 if (ProcToken->Used && ProcToken->SpinLock == Token) {\r
51dd408a
ED
1051 return TRUE;\r
1052 }\r
1053\r
1054 Link = GetNextNode (&gSmmCpuPrivate->TokenList, Link);\r
1055 }\r
1056\r
1057 return FALSE;\r
1058}\r
1059\r
1060/**\r
b948a496 1061 Allocate buffer for the SPIN_LOCK and PROCEDURE_TOKEN.\r
51dd408a
ED
1062\r
1063**/\r
b948a496
ED
1064VOID\r
1065AllocateTokenBuffer (\r
1066 VOID\r
51dd408a
ED
1067 )\r
1068{\r
51dd408a 1069 UINTN SpinLockSize;\r
9caaa79d 1070 UINT32 TokenCountPerChunk;\r
b948a496
ED
1071 UINTN ProcTokenSize;\r
1072 UINTN Index;\r
1073 PROCEDURE_TOKEN *ProcToken;\r
1074 SPIN_LOCK *SpinLock;\r
1075 UINT8 *SpinLockBuffer;\r
1076 UINT8 *ProcTokenBuffer;\r
51dd408a
ED
1077\r
1078 SpinLockSize = GetSpinLockProperties ();\r
b948a496
ED
1079 ProcTokenSize = sizeof (PROCEDURE_TOKEN);\r
1080\r
9caaa79d 1081 TokenCountPerChunk = FixedPcdGet32 (PcdCpuSmmMpTokenCountPerChunk);\r
b948a496
ED
1082 ASSERT (TokenCountPerChunk != 0);\r
1083 if (TokenCountPerChunk == 0) {\r
1084 DEBUG ((DEBUG_ERROR, "PcdCpuSmmMpTokenCountPerChunk should not be Zero!\n"));\r
1085 CpuDeadLoop ();\r
1086 }\r
1087 DEBUG ((DEBUG_INFO, "CpuSmm: SpinLock Size = 0x%x, PcdCpuSmmMpTokenCountPerChunk = 0x%x\n", SpinLockSize, TokenCountPerChunk));\r
9caaa79d 1088\r
b948a496
ED
1089 //\r
1090 // Separate the Spin_lock and Proc_token because the alignment requires by Spin_Lock.\r
1091 //\r
1092 SpinLockBuffer = AllocatePool (SpinLockSize * TokenCountPerChunk);\r
1093 ASSERT (SpinLockBuffer != NULL);\r
9caaa79d 1094\r
b948a496
ED
1095 ProcTokenBuffer = AllocatePool (ProcTokenSize * TokenCountPerChunk);\r
1096 ASSERT (ProcTokenBuffer != NULL);\r
1097\r
1098 for (Index = 0; Index < TokenCountPerChunk; Index++) {\r
1099 SpinLock = (SPIN_LOCK *)(SpinLockBuffer + SpinLockSize * Index);\r
1100 InitializeSpinLock (SpinLock);\r
1101\r
1102 ProcToken = (PROCEDURE_TOKEN *)(ProcTokenBuffer + ProcTokenSize * Index);\r
1103 ProcToken->Signature = PROCEDURE_TOKEN_SIGNATURE;\r
1104 ProcToken->SpinLock = SpinLock;\r
1105 ProcToken->Used = FALSE;\r
1106 ProcToken->RunningApCount = 0;\r
1107\r
1108 InsertTailList (&gSmmCpuPrivate->TokenList, &ProcToken->Link);\r
1109 }\r
1110}\r
9caaa79d 1111\r
b948a496
ED
1112/**\r
1113 Find first free token in the allocated token list.\r
1114\r
1115 @retval return the first free PROCEDURE_TOKEN.\r
1116\r
1117**/\r
1118PROCEDURE_TOKEN *\r
1119FindFirstFreeToken (\r
1120 VOID\r
1121 )\r
1122{\r
1123 LIST_ENTRY *Link;\r
1124 PROCEDURE_TOKEN *ProcToken;\r
9caaa79d 1125\r
b948a496
ED
1126 Link = GetFirstNode (&gSmmCpuPrivate->TokenList);\r
1127 while (!IsNull (&gSmmCpuPrivate->TokenList, Link)) {\r
1128 ProcToken = PROCEDURE_TOKEN_FROM_LINK (Link);\r
1129\r
1130 if (!ProcToken->Used) {\r
1131 return ProcToken;\r
1132 }\r
1133\r
1134 Link = GetNextNode (&gSmmCpuPrivate->TokenList, Link);\r
9caaa79d
ED
1135 }\r
1136\r
b948a496
ED
1137 return NULL;\r
1138}\r
1139\r
1140/**\r
1141 Get the free token.\r
1142\r
1143 If no free token, allocate new tokens then return the free one.\r
1144\r
1145 @retval return the first free PROCEDURE_TOKEN.\r
9caaa79d 1146\r
b948a496
ED
1147**/\r
1148PROCEDURE_TOKEN *\r
1149GetFreeToken (\r
1150 IN UINT32 RunningApsCount\r
1151 )\r
1152{\r
1153 PROCEDURE_TOKEN *NewToken;\r
51dd408a 1154\r
b948a496
ED
1155 NewToken = FindFirstFreeToken ();\r
1156 if (NewToken == NULL) {\r
1157 AllocateTokenBuffer ();\r
1158 NewToken = FindFirstFreeToken ();\r
1159 }\r
1160 ASSERT (NewToken != NULL);\r
51dd408a 1161\r
b948a496
ED
1162 NewToken->Used = TRUE;\r
1163 NewToken->RunningApCount = RunningApsCount;\r
1164 AcquireSpinLock (NewToken->SpinLock);\r
51dd408a 1165\r
b948a496 1166 return NewToken;\r
51dd408a
ED
1167}\r
1168\r
1169/**\r
1170 Checks status of specified AP.\r
1171\r
1172 This function checks whether the specified AP has finished the task assigned\r
1173 by StartupThisAP(), and whether timeout expires.\r
1174\r
1175 @param[in] Token This parameter describes the token that was passed into DispatchProcedure or\r
1176 BroadcastProcedure.\r
1177\r
1178 @retval EFI_SUCCESS Specified AP has finished task assigned by StartupThisAPs().\r
1179 @retval EFI_NOT_READY Specified AP has not finished task and timeout has not expired.\r
1180**/\r
1181EFI_STATUS\r
1182IsApReady (\r
1183 IN SPIN_LOCK *Token\r
1184 )\r
1185{\r
1186 if (AcquireSpinLockOrFail (Token)) {\r
1187 ReleaseSpinLock (Token);\r
1188 return EFI_SUCCESS;\r
1189 }\r
1190\r
1191 return EFI_NOT_READY;\r
1192}\r
1193\r
529a5a86
MK
1194/**\r
1195 Schedule a procedure to run on the specified CPU.\r
1196\r
717fb604
JY
1197 @param[in] Procedure The address of the procedure to run\r
1198 @param[in] CpuIndex Target CPU Index\r
51dd408a
ED
1199 @param[in,out] ProcArguments The parameter to pass to the procedure\r
1200 @param[in] Token This is an optional parameter that allows the caller to execute the\r
1201 procedure in a blocking or non-blocking fashion. If it is NULL the\r
1202 call is blocking, and the call will not return until the AP has\r
1203 completed the procedure. If the token is not NULL, the call will\r
1204 return immediately. The caller can check whether the procedure has\r
1205 completed with CheckOnProcedure or WaitForProcedure.\r
1206 @param[in] TimeoutInMicroseconds Indicates the time limit in microseconds for the APs to finish\r
1207 execution of Procedure, either for blocking or non-blocking mode.\r
1208 Zero means infinity. If the timeout expires before all APs return\r
1209 from Procedure, then Procedure on the failed APs is terminated. If\r
1210 the timeout expires in blocking mode, the call returns EFI_TIMEOUT.\r
1211 If the timeout expires in non-blocking mode, the timeout determined\r
1212 can be through CheckOnProcedure or WaitForProcedure.\r
1213 Note that timeout support is optional. Whether an implementation\r
1214 supports this feature can be determined via the Attributes data\r
1215 member.\r
1216 @param[in,out] CpuStatus This optional pointer may be used to get the status code returned\r
1217 by Procedure when it completes execution on the target AP, or with\r
1218 EFI_TIMEOUT if the Procedure fails to complete within the optional\r
1219 timeout. The implementation will update this variable with\r
1220 EFI_NOT_READY prior to starting Procedure on the target AP.\r
529a5a86
MK
1221\r
1222 @retval EFI_INVALID_PARAMETER CpuNumber not valid\r
1223 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP\r
1224 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM\r
1225 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy\r
1226 @retval EFI_SUCCESS The procedure has been successfully scheduled\r
1227\r
1228**/\r
1229EFI_STATUS\r
717fb604 1230InternalSmmStartupThisAp (\r
51dd408a
ED
1231 IN EFI_AP_PROCEDURE2 Procedure,\r
1232 IN UINTN CpuIndex,\r
1233 IN OUT VOID *ProcArguments OPTIONAL,\r
1234 IN MM_COMPLETION *Token,\r
1235 IN UINTN TimeoutInMicroseconds,\r
1236 IN OUT EFI_STATUS *CpuStatus\r
529a5a86
MK
1237 )\r
1238{\r
a457823f
ED
1239 PROCEDURE_TOKEN *ProcToken;\r
1240\r
717fb604
JY
1241 if (CpuIndex >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus) {\r
1242 DEBUG((DEBUG_ERROR, "CpuIndex(%d) >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus(%d)\n", CpuIndex, gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus));\r
1243 return EFI_INVALID_PARAMETER;\r
1244 }\r
1245 if (CpuIndex == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) {\r
1246 DEBUG((DEBUG_ERROR, "CpuIndex(%d) == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu\n", CpuIndex));\r
529a5a86
MK
1247 return EFI_INVALID_PARAMETER;\r
1248 }\r
b7025df8
JF
1249 if (gSmmCpuPrivate->ProcessorInfo[CpuIndex].ProcessorId == INVALID_APIC_ID) {\r
1250 return EFI_INVALID_PARAMETER;\r
1251 }\r
717fb604
JY
1252 if (!(*(mSmmMpSyncData->CpuData[CpuIndex].Present))) {\r
1253 if (mSmmMpSyncData->EffectiveSyncMode == SmmCpuSyncModeTradition) {\r
1254 DEBUG((DEBUG_ERROR, "!mSmmMpSyncData->CpuData[%d].Present\n", CpuIndex));\r
1255 }\r
1256 return EFI_INVALID_PARAMETER;\r
1257 }\r
1258 if (gSmmCpuPrivate->Operation[CpuIndex] == SmmCpuRemove) {\r
1259 if (!FeaturePcdGet (PcdCpuHotPlugSupport)) {\r
1260 DEBUG((DEBUG_ERROR, "gSmmCpuPrivate->Operation[%d] == SmmCpuRemove\n", CpuIndex));\r
1261 }\r
1262 return EFI_INVALID_PARAMETER;\r
1263 }\r
51dd408a
ED
1264 if ((TimeoutInMicroseconds != 0) && ((mSmmMp.Attributes & EFI_MM_MP_TIMEOUT_SUPPORTED) == 0)) {\r
1265 return EFI_INVALID_PARAMETER;\r
1266 }\r
1267 if (Procedure == NULL) {\r
1268 return EFI_INVALID_PARAMETER;\r
1269 }\r
717fb604 1270\r
832c4c7a 1271 AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
51dd408a 1272\r
529a5a86
MK
1273 mSmmMpSyncData->CpuData[CpuIndex].Procedure = Procedure;\r
1274 mSmmMpSyncData->CpuData[CpuIndex].Parameter = ProcArguments;\r
51dd408a 1275 if (Token != NULL) {\r
b948a496 1276 ProcToken= GetFreeToken (1);\r
a457823f
ED
1277 mSmmMpSyncData->CpuData[CpuIndex].Token = ProcToken;\r
1278 *Token = (MM_COMPLETION)ProcToken->SpinLock;\r
51dd408a
ED
1279 }\r
1280 mSmmMpSyncData->CpuData[CpuIndex].Status = CpuStatus;\r
1281 if (mSmmMpSyncData->CpuData[CpuIndex].Status != NULL) {\r
1282 *mSmmMpSyncData->CpuData[CpuIndex].Status = EFI_NOT_READY;\r
1283 }\r
1284\r
ed3d5ecb 1285 ReleaseSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r
529a5a86 1286\r
51dd408a 1287 if (Token == NULL) {\r
ed3d5ecb
JF
1288 AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
1289 ReleaseSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
529a5a86 1290 }\r
51dd408a
ED
1291\r
1292 return EFI_SUCCESS;\r
1293}\r
1294\r
1295/**\r
1296 Worker function to execute a caller provided function on all enabled APs.\r
1297\r
1298 @param[in] Procedure A pointer to the function to be run on\r
1299 enabled APs of the system.\r
1300 @param[in] TimeoutInMicroseconds Indicates the time limit in microseconds for\r
1301 APs to return from Procedure, either for\r
1302 blocking or non-blocking mode.\r
1303 @param[in,out] ProcedureArguments The parameter passed into Procedure for\r
1304 all APs.\r
1305 @param[in,out] Token This is an optional parameter that allows the caller to execute the\r
1306 procedure in a blocking or non-blocking fashion. If it is NULL the\r
1307 call is blocking, and the call will not return until the AP has\r
1308 completed the procedure. If the token is not NULL, the call will\r
1309 return immediately. The caller can check whether the procedure has\r
1310 completed with CheckOnProcedure or WaitForProcedure.\r
1311 @param[in,out] CPUStatus This optional pointer may be used to get the status code returned\r
1312 by Procedure when it completes execution on the target AP, or with\r
1313 EFI_TIMEOUT if the Procedure fails to complete within the optional\r
1314 timeout. The implementation will update this variable with\r
1315 EFI_NOT_READY prior to starting Procedure on the target AP.\r
1316\r
1317\r
1318 @retval EFI_SUCCESS In blocking mode, all APs have finished before\r
1319 the timeout expired.\r
1320 @retval EFI_SUCCESS In non-blocking mode, function has been dispatched\r
1321 to all enabled APs.\r
1322 @retval others Failed to Startup all APs.\r
1323\r
1324**/\r
1325EFI_STATUS\r
1326InternalSmmStartupAllAPs (\r
1327 IN EFI_AP_PROCEDURE2 Procedure,\r
1328 IN UINTN TimeoutInMicroseconds,\r
1329 IN OUT VOID *ProcedureArguments OPTIONAL,\r
1330 IN OUT MM_COMPLETION *Token,\r
1331 IN OUT EFI_STATUS *CPUStatus\r
1332 )\r
1333{\r
1334 UINTN Index;\r
1335 UINTN CpuCount;\r
a457823f 1336 PROCEDURE_TOKEN *ProcToken;\r
51dd408a
ED
1337\r
1338 if ((TimeoutInMicroseconds != 0) && ((mSmmMp.Attributes & EFI_MM_MP_TIMEOUT_SUPPORTED) == 0)) {\r
1339 return EFI_INVALID_PARAMETER;\r
1340 }\r
1341 if (Procedure == NULL) {\r
1342 return EFI_INVALID_PARAMETER;\r
1343 }\r
1344\r
1345 CpuCount = 0;\r
123b720e 1346 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
51dd408a
ED
1347 if (IsPresentAp (Index)) {\r
1348 CpuCount ++;\r
1349\r
1350 if (gSmmCpuPrivate->Operation[Index] == SmmCpuRemove) {\r
1351 return EFI_INVALID_PARAMETER;\r
1352 }\r
1353\r
1354 if (!AcquireSpinLockOrFail(mSmmMpSyncData->CpuData[Index].Busy)) {\r
1355 return EFI_NOT_READY;\r
1356 }\r
1357 ReleaseSpinLock (mSmmMpSyncData->CpuData[Index].Busy);\r
1358 }\r
1359 }\r
1360 if (CpuCount == 0) {\r
1361 return EFI_NOT_STARTED;\r
1362 }\r
1363\r
1364 if (Token != NULL) {\r
b948a496 1365 ProcToken = GetFreeToken ((UINT32)mMaxNumberOfCpus);\r
a457823f
ED
1366 *Token = (MM_COMPLETION)ProcToken->SpinLock;\r
1367 } else {\r
1368 ProcToken = NULL;\r
51dd408a
ED
1369 }\r
1370\r
1371 //\r
1372 // Make sure all BUSY should be acquired.\r
1373 //\r
1374 // Because former code already check mSmmMpSyncData->CpuData[***].Busy for each AP.\r
1375 // Here code always use AcquireSpinLock instead of AcquireSpinLockOrFail for not\r
1376 // block mode.\r
1377 //\r
123b720e 1378 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
51dd408a
ED
1379 if (IsPresentAp (Index)) {\r
1380 AcquireSpinLock (mSmmMpSyncData->CpuData[Index].Busy);\r
1381 }\r
1382 }\r
1383\r
123b720e 1384 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
51dd408a
ED
1385 if (IsPresentAp (Index)) {\r
1386 mSmmMpSyncData->CpuData[Index].Procedure = (EFI_AP_PROCEDURE2) Procedure;\r
1387 mSmmMpSyncData->CpuData[Index].Parameter = ProcedureArguments;\r
a457823f
ED
1388 if (ProcToken != NULL) {\r
1389 mSmmMpSyncData->CpuData[Index].Token = ProcToken;\r
51dd408a
ED
1390 }\r
1391 if (CPUStatus != NULL) {\r
1392 mSmmMpSyncData->CpuData[Index].Status = &CPUStatus[Index];\r
1393 if (mSmmMpSyncData->CpuData[Index].Status != NULL) {\r
1394 *mSmmMpSyncData->CpuData[Index].Status = EFI_NOT_READY;\r
1395 }\r
1396 }\r
1397 } else {\r
1398 //\r
1399 // PI spec requirement:\r
1400 // For every excluded processor, the array entry must contain a value of EFI_NOT_STARTED.\r
1401 //\r
1402 if (CPUStatus != NULL) {\r
1403 CPUStatus[Index] = EFI_NOT_STARTED;\r
1404 }\r
a457823f
ED
1405\r
1406 //\r
1407 // Decrease the count to mark this processor(AP or BSP) as finished.\r
1408 //\r
1409 if (ProcToken != NULL) {\r
1410 WaitForSemaphore (&ProcToken->RunningApCount);\r
1411 }\r
51dd408a
ED
1412 }\r
1413 }\r
1414\r
1415 ReleaseAllAPs ();\r
1416\r
1417 if (Token == NULL) {\r
1418 //\r
1419 // Make sure all APs have completed their tasks.\r
1420 //\r
1421 WaitForAllAPsNotBusy (TRUE);\r
1422 }\r
1423\r
1424 return EFI_SUCCESS;\r
1425}\r
1426\r
1427/**\r
1428 ISO C99 6.5.2.2 "Function calls", paragraph 9:\r
1429 If the function is defined with a type that is not compatible with\r
1430 the type (of the expression) pointed to by the expression that\r
1431 denotes the called function, the behavior is undefined.\r
1432\r
1433 So add below wrapper function to convert between EFI_AP_PROCEDURE\r
1434 and EFI_AP_PROCEDURE2.\r
1435\r
1436 Wrapper for Procedures.\r
1437\r
1438 @param[in] Buffer Pointer to PROCEDURE_WRAPPER buffer.\r
1439\r
1440**/\r
1441EFI_STATUS\r
1442EFIAPI\r
1443ProcedureWrapper (\r
5ed4c46f 1444 IN VOID *Buffer\r
51dd408a
ED
1445 )\r
1446{\r
1447 PROCEDURE_WRAPPER *Wrapper;\r
1448\r
1449 Wrapper = Buffer;\r
1450 Wrapper->Procedure (Wrapper->ProcedureArgument);\r
1451\r
529a5a86
MK
1452 return EFI_SUCCESS;\r
1453}\r
1454\r
717fb604
JY
1455/**\r
1456 Schedule a procedure to run on the specified CPU in blocking mode.\r
1457\r
1458 @param[in] Procedure The address of the procedure to run\r
1459 @param[in] CpuIndex Target CPU Index\r
1460 @param[in, out] ProcArguments The parameter to pass to the procedure\r
1461\r
1462 @retval EFI_INVALID_PARAMETER CpuNumber not valid\r
1463 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP\r
1464 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM\r
1465 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy\r
1466 @retval EFI_SUCCESS The procedure has been successfully scheduled\r
1467\r
1468**/\r
1469EFI_STATUS\r
1470EFIAPI\r
1471SmmBlockingStartupThisAp (\r
1472 IN EFI_AP_PROCEDURE Procedure,\r
1473 IN UINTN CpuIndex,\r
1474 IN OUT VOID *ProcArguments OPTIONAL\r
1475 )\r
1476{\r
51dd408a
ED
1477 PROCEDURE_WRAPPER Wrapper;\r
1478\r
1479 Wrapper.Procedure = Procedure;\r
1480 Wrapper.ProcedureArgument = ProcArguments;\r
1481\r
1482 //\r
1483 // Use wrapper function to convert EFI_AP_PROCEDURE to EFI_AP_PROCEDURE2.\r
1484 //\r
1485 return InternalSmmStartupThisAp (ProcedureWrapper, CpuIndex, &Wrapper, NULL, 0, NULL);\r
717fb604
JY
1486}\r
1487\r
1488/**\r
1489 Schedule a procedure to run on the specified CPU.\r
1490\r
1491 @param Procedure The address of the procedure to run\r
1492 @param CpuIndex Target CPU Index\r
1493 @param ProcArguments The parameter to pass to the procedure\r
1494\r
1495 @retval EFI_INVALID_PARAMETER CpuNumber not valid\r
1496 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP\r
1497 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM\r
1498 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy\r
1499 @retval EFI_SUCCESS The procedure has been successfully scheduled\r
1500\r
1501**/\r
1502EFI_STATUS\r
1503EFIAPI\r
1504SmmStartupThisAp (\r
1505 IN EFI_AP_PROCEDURE Procedure,\r
1506 IN UINTN CpuIndex,\r
1507 IN OUT VOID *ProcArguments OPTIONAL\r
1508 )\r
1509{\r
51dd408a
ED
1510 MM_COMPLETION Token;\r
1511\r
1512 gSmmCpuPrivate->ApWrapperFunc[CpuIndex].Procedure = Procedure;\r
1513 gSmmCpuPrivate->ApWrapperFunc[CpuIndex].ProcedureArgument = ProcArguments;\r
1514\r
1515 //\r
1516 // Use wrapper function to convert EFI_AP_PROCEDURE to EFI_AP_PROCEDURE2.\r
1517 //\r
1518 return InternalSmmStartupThisAp (\r
1519 ProcedureWrapper,\r
1520 CpuIndex,\r
1521 &gSmmCpuPrivate->ApWrapperFunc[CpuIndex],\r
1522 FeaturePcdGet (PcdCpuSmmBlockStartupThisAp) ? NULL : &Token,\r
1523 0,\r
1524 NULL\r
1525 );\r
717fb604
JY
1526}\r
1527\r
f45f2d4a 1528/**\r
3eed6dda 1529 This function sets DR6 & DR7 according to SMM save state, before running SMM C code.\r
f45f2d4a
JY
1530 They are useful when you want to enable hardware breakpoints in SMM without entry SMM mode.\r
1531\r
1532 NOTE: It might not be appreciated in runtime since it might\r
1533 conflict with OS debugging facilities. Turn them off in RELEASE.\r
1534\r
1535 @param CpuIndex CPU Index\r
1536\r
1537**/\r
1538VOID\r
1539EFIAPI\r
1540CpuSmmDebugEntry (\r
1541 IN UINTN CpuIndex\r
1542 )\r
1543{\r
1544 SMRAM_SAVE_STATE_MAP *CpuSaveState;\r
7367cc6c 1545\r
f45f2d4a 1546 if (FeaturePcdGet (PcdCpuSmmDebug)) {\r
717fb604 1547 ASSERT(CpuIndex < mMaxNumberOfCpus);\r
3eed6dda 1548 CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];\r
f45f2d4a
JY
1549 if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {\r
1550 AsmWriteDr6 (CpuSaveState->x86._DR6);\r
1551 AsmWriteDr7 (CpuSaveState->x86._DR7);\r
1552 } else {\r
1553 AsmWriteDr6 ((UINTN)CpuSaveState->x64._DR6);\r
1554 AsmWriteDr7 ((UINTN)CpuSaveState->x64._DR7);\r
1555 }\r
1556 }\r
1557}\r
1558\r
1559/**\r
3eed6dda 1560 This function restores DR6 & DR7 to SMM save state.\r
f45f2d4a
JY
1561\r
1562 NOTE: It might not be appreciated in runtime since it might\r
1563 conflict with OS debugging facilities. Turn them off in RELEASE.\r
1564\r
1565 @param CpuIndex CPU Index\r
1566\r
1567**/\r
1568VOID\r
1569EFIAPI\r
1570CpuSmmDebugExit (\r
1571 IN UINTN CpuIndex\r
1572 )\r
1573{\r
1574 SMRAM_SAVE_STATE_MAP *CpuSaveState;\r
1575\r
1576 if (FeaturePcdGet (PcdCpuSmmDebug)) {\r
717fb604 1577 ASSERT(CpuIndex < mMaxNumberOfCpus);\r
3eed6dda 1578 CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];\r
f45f2d4a
JY
1579 if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {\r
1580 CpuSaveState->x86._DR7 = (UINT32)AsmReadDr7 ();\r
1581 CpuSaveState->x86._DR6 = (UINT32)AsmReadDr6 ();\r
1582 } else {\r
1583 CpuSaveState->x64._DR7 = AsmReadDr7 ();\r
1584 CpuSaveState->x64._DR6 = AsmReadDr6 ();\r
1585 }\r
1586 }\r
1587}\r
1588\r
529a5a86
MK
1589/**\r
1590 C function for SMI entry, each processor comes here upon SMI trigger.\r
1591\r
1592 @param CpuIndex CPU Index\r
1593\r
1594**/\r
1595VOID\r
1596EFIAPI\r
1597SmiRendezvous (\r
1598 IN UINTN CpuIndex\r
1599 )\r
1600{\r
f85d3ce2
JF
1601 EFI_STATUS Status;\r
1602 BOOLEAN ValidSmi;\r
1603 BOOLEAN IsBsp;\r
1604 BOOLEAN BspInProgress;\r
1605 UINTN Index;\r
1606 UINTN Cr2;\r
717fb604
JY
1607\r
1608 ASSERT(CpuIndex < mMaxNumberOfCpus);\r
529a5a86
MK
1609\r
1610 //\r
37f9fea5
VN
1611 // Save Cr2 because Page Fault exception in SMM may override its value,\r
1612 // when using on-demand paging for above 4G memory.\r
529a5a86 1613 //\r
37f9fea5
VN
1614 Cr2 = 0;\r
1615 SaveCr2 (&Cr2);\r
529a5a86 1616\r
51dd408a
ED
1617 //\r
1618 // Call the user register Startup function first.\r
1619 //\r
1620 if (mSmmMpSyncData->StartupProcedure != NULL) {\r
1621 mSmmMpSyncData->StartupProcedure (mSmmMpSyncData->StartupProcArgs);\r
1622 }\r
1623\r
529a5a86
MK
1624 //\r
1625 // Perform CPU specific entry hooks\r
1626 //\r
1627 SmmCpuFeaturesRendezvousEntry (CpuIndex);\r
1628\r
1629 //\r
1630 // Determine if this is a valid SMI\r
1631 //\r
1632 ValidSmi = PlatformValidSmi();\r
1633\r
1634 //\r
1635 // Determine if BSP has been already in progress. Note this must be checked after\r
1636 // ValidSmi because BSP may clear a valid SMI source after checking in.\r
1637 //\r
fe3a75bc 1638 BspInProgress = *mSmmMpSyncData->InsideSmm;\r
529a5a86
MK
1639\r
1640 if (!BspInProgress && !ValidSmi) {\r
1641 //\r
1642 // If we reach here, it means when we sampled the ValidSmi flag, SMI status had not\r
1643 // been cleared by BSP in a new SMI run (so we have a truly invalid SMI), or SMI\r
1644 // status had been cleared by BSP and an existing SMI run has almost ended. (Note\r
1645 // we sampled ValidSmi flag BEFORE judging BSP-in-progress status.) In both cases, there\r
1646 // is nothing we need to do.\r
1647 //\r
1648 goto Exit;\r
1649 } else {\r
1650 //\r
1651 // Signal presence of this processor\r
1652 //\r
fe3a75bc 1653 if (ReleaseSemaphore (mSmmMpSyncData->Counter) == 0) {\r
529a5a86
MK
1654 //\r
1655 // BSP has already ended the synchronization, so QUIT!!!\r
1656 //\r
1657\r
1658 //\r
1659 // Wait for BSP's signal to finish SMI\r
1660 //\r
fe3a75bc 1661 while (*mSmmMpSyncData->AllCpusInSync) {\r
529a5a86
MK
1662 CpuPause ();\r
1663 }\r
1664 goto Exit;\r
1665 } else {\r
1666\r
1667 //\r
1668 // The BUSY lock is initialized to Released state.\r
1669 // This needs to be done early enough to be ready for BSP's SmmStartupThisAp() call.\r
1670 // E.g., with Relaxed AP flow, SmmStartupThisAp() may be called immediately\r
1671 // after AP's present flag is detected.\r
1672 //\r
ed3d5ecb 1673 InitializeSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
529a5a86
MK
1674 }\r
1675\r
529a5a86
MK
1676 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r
1677 ActivateSmmProfile (CpuIndex);\r
1678 }\r
1679\r
1680 if (BspInProgress) {\r
1681 //\r
1682 // BSP has been elected. Follow AP path, regardless of ValidSmi flag\r
1683 // as BSP may have cleared the SMI status\r
1684 //\r
1685 APHandler (CpuIndex, ValidSmi, mSmmMpSyncData->EffectiveSyncMode);\r
1686 } else {\r
1687 //\r
1688 // We have a valid SMI\r
1689 //\r
1690\r
1691 //\r
1692 // Elect BSP\r
1693 //\r
1694 IsBsp = FALSE;\r
1695 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {\r
1696 if (!mSmmMpSyncData->SwitchBsp || mSmmMpSyncData->CandidateBsp[CpuIndex]) {\r
1697 //\r
1698 // Call platform hook to do BSP election\r
1699 //\r
1700 Status = PlatformSmmBspElection (&IsBsp);\r
1701 if (EFI_SUCCESS == Status) {\r
1702 //\r
1703 // Platform hook determines successfully\r
1704 //\r
1705 if (IsBsp) {\r
1706 mSmmMpSyncData->BspIndex = (UINT32)CpuIndex;\r
1707 }\r
1708 } else {\r
1709 //\r
1710 // Platform hook fails to determine, use default BSP election method\r
1711 //\r
1712 InterlockedCompareExchange32 (\r
1713 (UINT32*)&mSmmMpSyncData->BspIndex,\r
1714 (UINT32)-1,\r
1715 (UINT32)CpuIndex\r
1716 );\r
1717 }\r
1718 }\r
1719 }\r
1720\r
1721 //\r
1722 // "mSmmMpSyncData->BspIndex == CpuIndex" means this is the BSP\r
1723 //\r
1724 if (mSmmMpSyncData->BspIndex == CpuIndex) {\r
1725\r
1726 //\r
1727 // Clear last request for SwitchBsp.\r
1728 //\r
1729 if (mSmmMpSyncData->SwitchBsp) {\r
1730 mSmmMpSyncData->SwitchBsp = FALSE;\r
1731 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
1732 mSmmMpSyncData->CandidateBsp[Index] = FALSE;\r
1733 }\r
1734 }\r
1735\r
1736 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r
1737 SmmProfileRecordSmiNum ();\r
1738 }\r
1739\r
1740 //\r
1741 // BSP Handler is always called with a ValidSmi == TRUE\r
1742 //\r
1743 BSPHandler (CpuIndex, mSmmMpSyncData->EffectiveSyncMode);\r
529a5a86
MK
1744 } else {\r
1745 APHandler (CpuIndex, ValidSmi, mSmmMpSyncData->EffectiveSyncMode);\r
1746 }\r
1747 }\r
1748\r
ed3d5ecb 1749 ASSERT (*mSmmMpSyncData->CpuData[CpuIndex].Run == 0);\r
529a5a86
MK
1750\r
1751 //\r
1752 // Wait for BSP's signal to exit SMI\r
1753 //\r
fe3a75bc 1754 while (*mSmmMpSyncData->AllCpusInSync) {\r
529a5a86
MK
1755 CpuPause ();\r
1756 }\r
1757 }\r
1758\r
1759Exit:\r
1760 SmmCpuFeaturesRendezvousExit (CpuIndex);\r
37f9fea5 1761\r
529a5a86
MK
1762 //\r
1763 // Restore Cr2\r
1764 //\r
37f9fea5 1765 RestoreCr2 (Cr2);\r
529a5a86
MK
1766}\r
1767\r
51dd408a
ED
1768/**\r
1769 Allocate buffer for SpinLock and Wrapper function buffer.\r
1770\r
1771**/\r
1772VOID\r
1773InitializeDataForMmMp (\r
1774 VOID\r
1775 )\r
1776{\r
1777 gSmmCpuPrivate->ApWrapperFunc = AllocatePool (sizeof (PROCEDURE_WRAPPER) * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus);\r
1778 ASSERT (gSmmCpuPrivate->ApWrapperFunc != NULL);\r
1779\r
1780 InitializeListHead (&gSmmCpuPrivate->TokenList);\r
b948a496
ED
1781\r
1782 AllocateTokenBuffer ();\r
51dd408a
ED
1783}\r
1784\r
1d648531
JF
1785/**\r
1786 Allocate buffer for all semaphores and spin locks.\r
1787\r
1788**/\r
1789VOID\r
1790InitializeSmmCpuSemaphores (\r
1791 VOID\r
1792 )\r
1793{\r
1794 UINTN ProcessorCount;\r
1795 UINTN TotalSize;\r
1796 UINTN GlobalSemaphoresSize;\r
4e920581 1797 UINTN CpuSemaphoresSize;\r
1d648531
JF
1798 UINTN SemaphoreSize;\r
1799 UINTN Pages;\r
1800 UINTN *SemaphoreBlock;\r
1801 UINTN SemaphoreAddr;\r
1802\r
1803 SemaphoreSize = GetSpinLockProperties ();\r
1804 ProcessorCount = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;\r
1805 GlobalSemaphoresSize = (sizeof (SMM_CPU_SEMAPHORE_GLOBAL) / sizeof (VOID *)) * SemaphoreSize;\r
4e920581 1806 CpuSemaphoresSize = (sizeof (SMM_CPU_SEMAPHORE_CPU) / sizeof (VOID *)) * ProcessorCount * SemaphoreSize;\r
31fb3334 1807 TotalSize = GlobalSemaphoresSize + CpuSemaphoresSize;\r
1d648531
JF
1808 DEBUG((EFI_D_INFO, "One Semaphore Size = 0x%x\n", SemaphoreSize));\r
1809 DEBUG((EFI_D_INFO, "Total Semaphores Size = 0x%x\n", TotalSize));\r
1810 Pages = EFI_SIZE_TO_PAGES (TotalSize);\r
1811 SemaphoreBlock = AllocatePages (Pages);\r
1812 ASSERT (SemaphoreBlock != NULL);\r
1813 ZeroMem (SemaphoreBlock, TotalSize);\r
1814\r
1815 SemaphoreAddr = (UINTN)SemaphoreBlock;\r
1816 mSmmCpuSemaphores.SemaphoreGlobal.Counter = (UINT32 *)SemaphoreAddr;\r
1817 SemaphoreAddr += SemaphoreSize;\r
1818 mSmmCpuSemaphores.SemaphoreGlobal.InsideSmm = (BOOLEAN *)SemaphoreAddr;\r
1819 SemaphoreAddr += SemaphoreSize;\r
1820 mSmmCpuSemaphores.SemaphoreGlobal.AllCpusInSync = (BOOLEAN *)SemaphoreAddr;\r
1821 SemaphoreAddr += SemaphoreSize;\r
1822 mSmmCpuSemaphores.SemaphoreGlobal.PFLock = (SPIN_LOCK *)SemaphoreAddr;\r
1823 SemaphoreAddr += SemaphoreSize;\r
1824 mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock\r
1825 = (SPIN_LOCK *)SemaphoreAddr;\r
6c4c15fa 1826 SemaphoreAddr += SemaphoreSize;\r
6c4c15fa 1827\r
4e920581
JF
1828 SemaphoreAddr = (UINTN)SemaphoreBlock + GlobalSemaphoresSize;\r
1829 mSmmCpuSemaphores.SemaphoreCpu.Busy = (SPIN_LOCK *)SemaphoreAddr;\r
1830 SemaphoreAddr += ProcessorCount * SemaphoreSize;\r
1831 mSmmCpuSemaphores.SemaphoreCpu.Run = (UINT32 *)SemaphoreAddr;\r
1832 SemaphoreAddr += ProcessorCount * SemaphoreSize;\r
1833 mSmmCpuSemaphores.SemaphoreCpu.Present = (BOOLEAN *)SemaphoreAddr;\r
1834\r
fe3a75bc
JF
1835 mPFLock = mSmmCpuSemaphores.SemaphoreGlobal.PFLock;\r
1836 mConfigSmmCodeAccessCheckLock = mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock;\r
1837\r
1d648531
JF
1838 mSemaphoreSize = SemaphoreSize;\r
1839}\r
529a5a86
MK
1840\r
1841/**\r
1842 Initialize un-cacheable data.\r
1843\r
1844**/\r
1845VOID\r
1846EFIAPI\r
1847InitializeMpSyncData (\r
1848 VOID\r
1849 )\r
1850{\r
8b9311b7
JF
1851 UINTN CpuIndex;\r
1852\r
529a5a86 1853 if (mSmmMpSyncData != NULL) {\r
e78a2a49
JF
1854 //\r
1855 // mSmmMpSyncDataSize includes one structure of SMM_DISPATCHER_MP_SYNC_DATA, one\r
1856 // CpuData array of SMM_CPU_DATA_BLOCK and one CandidateBsp array of BOOLEAN.\r
1857 //\r
1858 ZeroMem (mSmmMpSyncData, mSmmMpSyncDataSize);\r
529a5a86
MK
1859 mSmmMpSyncData->CpuData = (SMM_CPU_DATA_BLOCK *)((UINT8 *)mSmmMpSyncData + sizeof (SMM_DISPATCHER_MP_SYNC_DATA));\r
1860 mSmmMpSyncData->CandidateBsp = (BOOLEAN *)(mSmmMpSyncData->CpuData + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus);\r
1861 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {\r
1862 //\r
1863 // Enable BSP election by setting BspIndex to -1\r
1864 //\r
1865 mSmmMpSyncData->BspIndex = (UINT32)-1;\r
1866 }\r
b43dd229 1867 mSmmMpSyncData->EffectiveSyncMode = mCpuSmmSyncMode;\r
1d648531 1868\r
8b9311b7
JF
1869 mSmmMpSyncData->Counter = mSmmCpuSemaphores.SemaphoreGlobal.Counter;\r
1870 mSmmMpSyncData->InsideSmm = mSmmCpuSemaphores.SemaphoreGlobal.InsideSmm;\r
1871 mSmmMpSyncData->AllCpusInSync = mSmmCpuSemaphores.SemaphoreGlobal.AllCpusInSync;\r
1872 ASSERT (mSmmMpSyncData->Counter != NULL && mSmmMpSyncData->InsideSmm != NULL &&\r
1873 mSmmMpSyncData->AllCpusInSync != NULL);\r
1874 *mSmmMpSyncData->Counter = 0;\r
1875 *mSmmMpSyncData->InsideSmm = FALSE;\r
1876 *mSmmMpSyncData->AllCpusInSync = FALSE;\r
1877\r
1878 for (CpuIndex = 0; CpuIndex < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; CpuIndex ++) {\r
1879 mSmmMpSyncData->CpuData[CpuIndex].Busy =\r
1880 (SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Busy + mSemaphoreSize * CpuIndex);\r
1881 mSmmMpSyncData->CpuData[CpuIndex].Run =\r
1882 (UINT32 *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Run + mSemaphoreSize * CpuIndex);\r
1883 mSmmMpSyncData->CpuData[CpuIndex].Present =\r
1884 (BOOLEAN *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Present + mSemaphoreSize * CpuIndex);\r
56e4a7d7
JF
1885 *(mSmmMpSyncData->CpuData[CpuIndex].Busy) = 0;\r
1886 *(mSmmMpSyncData->CpuData[CpuIndex].Run) = 0;\r
1887 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;\r
8b9311b7 1888 }\r
529a5a86
MK
1889 }\r
1890}\r
1891\r
1892/**\r
1893 Initialize global data for MP synchronization.\r
1894\r
3eb69b08
JY
1895 @param Stacks Base address of SMI stack buffer for all processors.\r
1896 @param StackSize Stack size for each processor in SMM.\r
1897 @param ShadowStackSize Shadow Stack size for each processor in SMM.\r
529a5a86
MK
1898\r
1899**/\r
1900UINT32\r
1901InitializeMpServiceData (\r
1902 IN VOID *Stacks,\r
3eb69b08
JY
1903 IN UINTN StackSize,\r
1904 IN UINTN ShadowStackSize\r
529a5a86
MK
1905 )\r
1906{\r
1907 UINT32 Cr3;\r
1908 UINTN Index;\r
529a5a86 1909 UINT8 *GdtTssTables;\r
529a5a86 1910 UINTN GdtTableStepSize;\r
ba40cb31
MK
1911 CPUID_VERSION_INFO_EDX RegEdx;\r
1912\r
1913 //\r
1914 // Determine if this CPU supports machine check\r
1915 //\r
1916 AsmCpuid (CPUID_VERSION_INFO, NULL, NULL, NULL, &RegEdx.Uint32);\r
1917 mMachineCheckSupported = (BOOLEAN)(RegEdx.Bits.MCA == 1);\r
529a5a86 1918\r
8b9311b7
JF
1919 //\r
1920 // Allocate memory for all locks and semaphores\r
1921 //\r
1922 InitializeSmmCpuSemaphores ();\r
1923\r
d67b73cc
JF
1924 //\r
1925 // Initialize mSmmMpSyncData\r
1926 //\r
1927 mSmmMpSyncDataSize = sizeof (SMM_DISPATCHER_MP_SYNC_DATA) +\r
1928 (sizeof (SMM_CPU_DATA_BLOCK) + sizeof (BOOLEAN)) * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;\r
1929 mSmmMpSyncData = (SMM_DISPATCHER_MP_SYNC_DATA*) AllocatePages (EFI_SIZE_TO_PAGES (mSmmMpSyncDataSize));\r
1930 ASSERT (mSmmMpSyncData != NULL);\r
b43dd229 1931 mCpuSmmSyncMode = (SMM_CPU_SYNC_MODE)PcdGet8 (PcdCpuSmmSyncMode);\r
d67b73cc
JF
1932 InitializeMpSyncData ();\r
1933\r
529a5a86
MK
1934 //\r
1935 // Initialize physical address mask\r
1936 // NOTE: Physical memory above virtual address limit is not supported !!!\r
1937 //\r
1938 AsmCpuid (0x80000008, (UINT32*)&Index, NULL, NULL, NULL);\r
1939 gPhyMask = LShiftU64 (1, (UINT8)Index) - 1;\r
1940 gPhyMask &= (1ull << 48) - EFI_PAGE_SIZE;\r
1941\r
1942 //\r
1943 // Create page tables\r
1944 //\r
1945 Cr3 = SmmInitPageTable ();\r
1946\r
fe5f1949 1947 GdtTssTables = InitGdt (Cr3, &GdtTableStepSize);\r
529a5a86
MK
1948\r
1949 //\r
f12367a0 1950 // Install SMI handler for each CPU\r
529a5a86
MK
1951 //\r
1952 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
529a5a86
MK
1953 InstallSmiHandler (\r
1954 Index,\r
1955 (UINT32)mCpuHotPlugData.SmBase[Index],\r
3eb69b08 1956 (VOID*)((UINTN)Stacks + (StackSize + ShadowStackSize) * Index),\r
529a5a86 1957 StackSize,\r
f12367a0
MK
1958 (UINTN)(GdtTssTables + GdtTableStepSize * Index),\r
1959 gcSmiGdtr.Limit + 1,\r
529a5a86
MK
1960 gcSmiIdtr.Base,\r
1961 gcSmiIdtr.Limit + 1,\r
1962 Cr3\r
1963 );\r
1964 }\r
1965\r
529a5a86
MK
1966 //\r
1967 // Record current MTRR settings\r
1968 //\r
26ab5ac3
MK
1969 ZeroMem (&gSmiMtrrs, sizeof (gSmiMtrrs));\r
1970 MtrrGetAllMtrrs (&gSmiMtrrs);\r
529a5a86
MK
1971\r
1972 return Cr3;\r
1973}\r
1974\r
1975/**\r
1976\r
1977 Register the SMM Foundation entry point.\r
1978\r
1979 @param This Pointer to EFI_SMM_CONFIGURATION_PROTOCOL instance\r
1980 @param SmmEntryPoint SMM Foundation EntryPoint\r
1981\r
1982 @retval EFI_SUCCESS Successfully to register SMM foundation entry point\r
1983\r
1984**/\r
1985EFI_STATUS\r
1986EFIAPI\r
1987RegisterSmmEntry (\r
1988 IN CONST EFI_SMM_CONFIGURATION_PROTOCOL *This,\r
1989 IN EFI_SMM_ENTRY_POINT SmmEntryPoint\r
1990 )\r
1991{\r
1992 //\r
1993 // Record SMM Foundation EntryPoint, later invoke it on SMI entry vector.\r
1994 //\r
1995 gSmmCpuPrivate->SmmCoreEntry = SmmEntryPoint;\r
1996 return EFI_SUCCESS;\r
1997}\r
51dd408a
ED
1998\r
1999/**\r
2000\r
2001 Register the SMM Foundation entry point.\r
2002\r
2003 @param[in] Procedure A pointer to the code stream to be run on the designated target AP\r
2004 of the system. Type EFI_AP_PROCEDURE is defined below in Volume 2\r
2005 with the related definitions of\r
2006 EFI_MP_SERVICES_PROTOCOL.StartupAllAPs.\r
2007 If caller may pass a value of NULL to deregister any existing\r
2008 startup procedure.\r
073f2ced 2009 @param[in,out] ProcedureArguments Allows the caller to pass a list of parameters to the code that is\r
51dd408a
ED
2010 run by the AP. It is an optional common mailbox between APs and\r
2011 the caller to share information\r
2012\r
2013 @retval EFI_SUCCESS The Procedure has been set successfully.\r
2014 @retval EFI_INVALID_PARAMETER The Procedure is NULL but ProcedureArguments not NULL.\r
2015\r
2016**/\r
2017EFI_STATUS\r
2018RegisterStartupProcedure (\r
073f2ced
SZ
2019 IN EFI_AP_PROCEDURE Procedure,\r
2020 IN OUT VOID *ProcedureArguments OPTIONAL\r
51dd408a
ED
2021 )\r
2022{\r
2023 if (Procedure == NULL && ProcedureArguments != NULL) {\r
2024 return EFI_INVALID_PARAMETER;\r
2025 }\r
2026 if (mSmmMpSyncData == NULL) {\r
2027 return EFI_NOT_READY;\r
2028 }\r
2029\r
2030 mSmmMpSyncData->StartupProcedure = Procedure;\r
2031 mSmmMpSyncData->StartupProcArgs = ProcedureArguments;\r
2032\r
2033 return EFI_SUCCESS;\r
2034}\r