]> git.proxmox.com Git - mirror_edk2.git/blame - UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c
UefiCpuPkg/PiSmmCpuDxeSmm: Avoid allocate Token every time
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / MpService.c
CommitLineData
529a5a86
MK
1/** @file\r
2SMM MP service implementation\r
3\r
3eb69b08 4Copyright (c) 2009 - 2019, Intel Corporation. All rights reserved.<BR>\r
241f9149
LD
5Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>\r
6\r
0acd8697 7SPDX-License-Identifier: BSD-2-Clause-Patent\r
529a5a86
MK
8\r
9**/\r
10\r
11#include "PiSmmCpuDxeSmm.h"\r
12\r
13//\r
14// Slots for all MTRR( FIXED MTRR + VARIABLE MTRR + MTRR_LIB_IA32_MTRR_DEF_TYPE)\r
15//\r
26ab5ac3 16MTRR_SETTINGS gSmiMtrrs;\r
529a5a86
MK
17UINT64 gPhyMask;\r
18SMM_DISPATCHER_MP_SYNC_DATA *mSmmMpSyncData = NULL;\r
19UINTN mSmmMpSyncDataSize;\r
1d648531
JF
20SMM_CPU_SEMAPHORES mSmmCpuSemaphores;\r
21UINTN mSemaphoreSize;\r
fe3a75bc 22SPIN_LOCK *mPFLock = NULL;\r
b43dd229 23SMM_CPU_SYNC_MODE mCpuSmmSyncMode;\r
ba40cb31 24BOOLEAN mMachineCheckSupported = FALSE;\r
529a5a86
MK
25\r
26/**\r
27 Performs an atomic compare exchange operation to get semaphore.\r
28 The compare exchange operation must be performed using\r
29 MP safe mechanisms.\r
30\r
31 @param Sem IN: 32-bit unsigned integer\r
32 OUT: original integer - 1\r
33 @return Original integer - 1\r
34\r
35**/\r
36UINT32\r
37WaitForSemaphore (\r
38 IN OUT volatile UINT32 *Sem\r
39 )\r
40{\r
41 UINT32 Value;\r
42\r
43 do {\r
44 Value = *Sem;\r
45 } while (Value == 0 ||\r
46 InterlockedCompareExchange32 (\r
47 (UINT32*)Sem,\r
48 Value,\r
49 Value - 1\r
50 ) != Value);\r
51 return Value - 1;\r
52}\r
53\r
54\r
55/**\r
56 Performs an atomic compare exchange operation to release semaphore.\r
57 The compare exchange operation must be performed using\r
58 MP safe mechanisms.\r
59\r
60 @param Sem IN: 32-bit unsigned integer\r
61 OUT: original integer + 1\r
62 @return Original integer + 1\r
63\r
64**/\r
65UINT32\r
66ReleaseSemaphore (\r
67 IN OUT volatile UINT32 *Sem\r
68 )\r
69{\r
70 UINT32 Value;\r
71\r
72 do {\r
73 Value = *Sem;\r
74 } while (Value + 1 != 0 &&\r
75 InterlockedCompareExchange32 (\r
76 (UINT32*)Sem,\r
77 Value,\r
78 Value + 1\r
79 ) != Value);\r
80 return Value + 1;\r
81}\r
82\r
83/**\r
84 Performs an atomic compare exchange operation to lock semaphore.\r
85 The compare exchange operation must be performed using\r
86 MP safe mechanisms.\r
87\r
88 @param Sem IN: 32-bit unsigned integer\r
89 OUT: -1\r
90 @return Original integer\r
91\r
92**/\r
93UINT32\r
94LockdownSemaphore (\r
95 IN OUT volatile UINT32 *Sem\r
96 )\r
97{\r
98 UINT32 Value;\r
99\r
100 do {\r
101 Value = *Sem;\r
102 } while (InterlockedCompareExchange32 (\r
103 (UINT32*)Sem,\r
104 Value, (UINT32)-1\r
105 ) != Value);\r
106 return Value;\r
107}\r
108\r
109/**\r
110 Wait all APs to performs an atomic compare exchange operation to release semaphore.\r
111\r
112 @param NumberOfAPs AP number\r
113\r
114**/\r
115VOID\r
116WaitForAllAPs (\r
117 IN UINTN NumberOfAPs\r
118 )\r
119{\r
120 UINTN BspIndex;\r
121\r
122 BspIndex = mSmmMpSyncData->BspIndex;\r
123 while (NumberOfAPs-- > 0) {\r
ed3d5ecb 124 WaitForSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
529a5a86
MK
125 }\r
126}\r
127\r
128/**\r
129 Performs an atomic compare exchange operation to release semaphore\r
130 for each AP.\r
131\r
132**/\r
133VOID\r
134ReleaseAllAPs (\r
135 VOID\r
136 )\r
137{\r
138 UINTN Index;\r
529a5a86 139\r
529a5a86 140 for (Index = mMaxNumberOfCpus; Index-- > 0;) {\r
51dd408a 141 if (IsPresentAp (Index)) {\r
ed3d5ecb 142 ReleaseSemaphore (mSmmMpSyncData->CpuData[Index].Run);\r
529a5a86
MK
143 }\r
144 }\r
145}\r
146\r
147/**\r
148 Checks if all CPUs (with certain exceptions) have checked in for this SMI run\r
149\r
150 @param Exceptions CPU Arrival exception flags.\r
151\r
152 @retval TRUE if all CPUs the have checked in.\r
153 @retval FALSE if at least one Normal AP hasn't checked in.\r
154\r
155**/\r
156BOOLEAN\r
157AllCpusInSmmWithExceptions (\r
158 SMM_CPU_ARRIVAL_EXCEPTIONS Exceptions\r
159 )\r
160{\r
161 UINTN Index;\r
162 SMM_CPU_DATA_BLOCK *CpuData;\r
163 EFI_PROCESSOR_INFORMATION *ProcessorInfo;\r
164\r
fe3a75bc 165 ASSERT (*mSmmMpSyncData->Counter <= mNumberOfCpus);\r
529a5a86 166\r
fe3a75bc 167 if (*mSmmMpSyncData->Counter == mNumberOfCpus) {\r
529a5a86
MK
168 return TRUE;\r
169 }\r
170\r
171 CpuData = mSmmMpSyncData->CpuData;\r
172 ProcessorInfo = gSmmCpuPrivate->ProcessorInfo;\r
173 for (Index = mMaxNumberOfCpus; Index-- > 0;) {\r
ed3d5ecb 174 if (!(*(CpuData[Index].Present)) && ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID) {\r
529a5a86
MK
175 if (((Exceptions & ARRIVAL_EXCEPTION_DELAYED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmDelayed) != 0) {\r
176 continue;\r
177 }\r
178 if (((Exceptions & ARRIVAL_EXCEPTION_BLOCKED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmBlocked) != 0) {\r
179 continue;\r
180 }\r
181 if (((Exceptions & ARRIVAL_EXCEPTION_SMI_DISABLED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmEnable) != 0) {\r
182 continue;\r
183 }\r
184 return FALSE;\r
185 }\r
186 }\r
187\r
188\r
189 return TRUE;\r
190}\r
191\r
12c66382
ED
192/**\r
193 Has OS enabled Lmce in the MSR_IA32_MCG_EXT_CTL\r
7367cc6c 194\r
12c66382
ED
195 @retval TRUE Os enable lmce.\r
196 @retval FALSE Os not enable lmce.\r
197\r
198**/\r
199BOOLEAN\r
200IsLmceOsEnabled (\r
201 VOID\r
202 )\r
203{\r
204 MSR_IA32_MCG_CAP_REGISTER McgCap;\r
205 MSR_IA32_FEATURE_CONTROL_REGISTER FeatureCtrl;\r
206 MSR_IA32_MCG_EXT_CTL_REGISTER McgExtCtrl;\r
207\r
208 McgCap.Uint64 = AsmReadMsr64 (MSR_IA32_MCG_CAP);\r
209 if (McgCap.Bits.MCG_LMCE_P == 0) {\r
210 return FALSE;\r
211 }\r
212\r
213 FeatureCtrl.Uint64 = AsmReadMsr64 (MSR_IA32_FEATURE_CONTROL);\r
214 if (FeatureCtrl.Bits.LmceOn == 0) {\r
215 return FALSE;\r
216 }\r
217\r
218 McgExtCtrl.Uint64 = AsmReadMsr64 (MSR_IA32_MCG_EXT_CTL);\r
219 return (BOOLEAN) (McgExtCtrl.Bits.LMCE_EN == 1);\r
220}\r
221\r
222/**\r
7367cc6c 223 Return if Local machine check exception signaled.\r
12c66382 224\r
7367cc6c 225 Indicates (when set) that a local machine check exception was generated. This indicates that the current machine-check event was\r
12c66382
ED
226 delivered to only the logical processor.\r
227\r
228 @retval TRUE LMCE was signaled.\r
229 @retval FALSE LMCE was not signaled.\r
230\r
231**/\r
232BOOLEAN\r
233IsLmceSignaled (\r
234 VOID\r
235 )\r
236{\r
237 MSR_IA32_MCG_STATUS_REGISTER McgStatus;\r
238\r
239 McgStatus.Uint64 = AsmReadMsr64 (MSR_IA32_MCG_STATUS);\r
240 return (BOOLEAN) (McgStatus.Bits.LMCE_S == 1);\r
241}\r
529a5a86
MK
242\r
243/**\r
244 Given timeout constraint, wait for all APs to arrive, and insure when this function returns, no AP will execute normal mode code before\r
245 entering SMM, except SMI disabled APs.\r
246\r
247**/\r
248VOID\r
249SmmWaitForApArrival (\r
250 VOID\r
251 )\r
252{\r
253 UINT64 Timer;\r
254 UINTN Index;\r
12c66382
ED
255 BOOLEAN LmceEn;\r
256 BOOLEAN LmceSignal;\r
529a5a86 257\r
fe3a75bc 258 ASSERT (*mSmmMpSyncData->Counter <= mNumberOfCpus);\r
529a5a86 259\r
ba40cb31
MK
260 LmceEn = FALSE;\r
261 LmceSignal = FALSE;\r
262 if (mMachineCheckSupported) {\r
263 LmceEn = IsLmceOsEnabled ();\r
264 LmceSignal = IsLmceSignaled();\r
265 }\r
12c66382 266\r
529a5a86
MK
267 //\r
268 // Platform implementor should choose a timeout value appropriately:\r
269 // - The timeout value should balance the SMM time constrains and the likelihood that delayed CPUs are excluded in the SMM run. Note\r
270 // the SMI Handlers must ALWAYS take into account the cases that not all APs are available in an SMI run.\r
271 // - The timeout value must, in the case of 2nd timeout, be at least long enough to give time for all APs to receive the SMI IPI\r
272 // and either enter SMM or buffer the SMI, to insure there is no CPU running normal mode code when SMI handling starts. This will\r
273 // be TRUE even if a blocked CPU is brought out of the blocked state by a normal mode CPU (before the normal mode CPU received the\r
274 // SMI IPI), because with a buffered SMI, and CPU will enter SMM immediately after it is brought out of the blocked state.\r
275 // - The timeout value must be longer than longest possible IO operation in the system\r
276 //\r
277\r
278 //\r
279 // Sync with APs 1st timeout\r
280 //\r
281 for (Timer = StartSyncTimer ();\r
12c66382 282 !IsSyncTimerTimeout (Timer) && !(LmceEn && LmceSignal) &&\r
529a5a86
MK
283 !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED | ARRIVAL_EXCEPTION_SMI_DISABLED );\r
284 ) {\r
285 CpuPause ();\r
286 }\r
287\r
288 //\r
289 // Not all APs have arrived, so we need 2nd round of timeout. IPIs should be sent to ALL none present APs,\r
290 // because:\r
291 // a) Delayed AP may have just come out of the delayed state. Blocked AP may have just been brought out of blocked state by some AP running\r
292 // normal mode code. These APs need to be guaranteed to have an SMI pending to insure that once they are out of delayed / blocked state, they\r
293 // enter SMI immediately without executing instructions in normal mode. Note traditional flow requires there are no APs doing normal mode\r
294 // work while SMI handling is on-going.\r
295 // b) As a consequence of SMI IPI sending, (spurious) SMI may occur after this SMM run.\r
296 // c) ** NOTE **: Use SMI disabling feature VERY CAREFULLY (if at all) for traditional flow, because a processor in SMI-disabled state\r
297 // will execute normal mode code, which breaks the traditional SMI handlers' assumption that no APs are doing normal\r
298 // mode work while SMI handling is on-going.\r
299 // d) We don't add code to check SMI disabling status to skip sending IPI to SMI disabled APs, because:\r
300 // - In traditional flow, SMI disabling is discouraged.\r
301 // - In relaxed flow, CheckApArrival() will check SMI disabling status before calling this function.\r
302 // In both cases, adding SMI-disabling checking code increases overhead.\r
303 //\r
fe3a75bc 304 if (*mSmmMpSyncData->Counter < mNumberOfCpus) {\r
529a5a86
MK
305 //\r
306 // Send SMI IPIs to bring outside processors in\r
307 //\r
308 for (Index = mMaxNumberOfCpus; Index-- > 0;) {\r
ed3d5ecb 309 if (!(*(mSmmMpSyncData->CpuData[Index].Present)) && gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID) {\r
529a5a86
MK
310 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId);\r
311 }\r
312 }\r
313\r
314 //\r
315 // Sync with APs 2nd timeout.\r
316 //\r
317 for (Timer = StartSyncTimer ();\r
318 !IsSyncTimerTimeout (Timer) &&\r
319 !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED | ARRIVAL_EXCEPTION_SMI_DISABLED );\r
320 ) {\r
321 CpuPause ();\r
322 }\r
323 }\r
324\r
325 return;\r
326}\r
327\r
328\r
329/**\r
330 Replace OS MTRR's with SMI MTRR's.\r
331\r
332 @param CpuIndex Processor Index\r
333\r
334**/\r
335VOID\r
336ReplaceOSMtrrs (\r
337 IN UINTN CpuIndex\r
338 )\r
339{\r
529a5a86
MK
340 SmmCpuFeaturesDisableSmrr ();\r
341\r
342 //\r
343 // Replace all MTRRs registers\r
344 //\r
26ab5ac3 345 MtrrSetAllMtrrs (&gSmiMtrrs);\r
529a5a86
MK
346}\r
347\r
51dd408a
ED
348/**\r
349 Wheck whether task has been finished by all APs.\r
350\r
351 @param BlockMode Whether did it in block mode or non-block mode.\r
352\r
353 @retval TRUE Task has been finished by all APs.\r
354 @retval FALSE Task not has been finished by all APs.\r
355\r
356**/\r
357BOOLEAN\r
358WaitForAllAPsNotBusy (\r
359 IN BOOLEAN BlockMode\r
360 )\r
361{\r
362 UINTN Index;\r
363\r
364 for (Index = mMaxNumberOfCpus; Index-- > 0;) {\r
365 //\r
366 // Ignore BSP and APs which not call in SMM.\r
367 //\r
368 if (!IsPresentAp(Index)) {\r
369 continue;\r
370 }\r
371\r
372 if (BlockMode) {\r
373 AcquireSpinLock(mSmmMpSyncData->CpuData[Index].Busy);\r
374 ReleaseSpinLock(mSmmMpSyncData->CpuData[Index].Busy);\r
375 } else {\r
376 if (AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[Index].Busy)) {\r
377 ReleaseSpinLock(mSmmMpSyncData->CpuData[Index].Busy);\r
378 } else {\r
379 return FALSE;\r
380 }\r
381 }\r
382 }\r
383\r
384 return TRUE;\r
385}\r
386\r
387/**\r
388 Check whether it is an present AP.\r
389\r
390 @param CpuIndex The AP index which calls this function.\r
391\r
392 @retval TRUE It's a present AP.\r
393 @retval TRUE This is not an AP or it is not present.\r
394\r
395**/\r
396BOOLEAN\r
397IsPresentAp (\r
398 IN UINTN CpuIndex\r
399 )\r
400{\r
401 return ((CpuIndex != gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) &&\r
402 *(mSmmMpSyncData->CpuData[CpuIndex].Present));\r
403}\r
404\r
405/**\r
406 Check whether execute in single AP or all APs.\r
407\r
408 Compare two Tokens used by different APs to know whether in StartAllAps call.\r
409\r
410 Whether is an valid AP base on AP's Present flag.\r
411\r
412 @retval TRUE IN StartAllAps call.\r
413 @retval FALSE Not in StartAllAps call.\r
414\r
415**/\r
416BOOLEAN\r
417InStartAllApsCall (\r
418 VOID\r
419 )\r
420{\r
421 UINTN ApIndex;\r
422 UINTN ApIndex2;\r
423\r
424 for (ApIndex = mMaxNumberOfCpus; ApIndex-- > 0;) {\r
425 if (IsPresentAp (ApIndex) && (mSmmMpSyncData->CpuData[ApIndex].Token != NULL)) {\r
426 for (ApIndex2 = ApIndex; ApIndex2-- > 0;) {\r
427 if (IsPresentAp (ApIndex2) && (mSmmMpSyncData->CpuData[ApIndex2].Token != NULL)) {\r
428 return mSmmMpSyncData->CpuData[ApIndex2].Token == mSmmMpSyncData->CpuData[ApIndex].Token;\r
429 }\r
430 }\r
431 }\r
432 }\r
433\r
434 return FALSE;\r
435}\r
436\r
437/**\r
438 Clean up the status flags used during executing the procedure.\r
439\r
440 @param CpuIndex The AP index which calls this function.\r
441\r
442**/\r
443VOID\r
444ReleaseToken (\r
445 IN UINTN CpuIndex\r
446 )\r
447{\r
448 UINTN Index;\r
449 BOOLEAN Released;\r
450\r
451 if (InStartAllApsCall ()) {\r
452 //\r
453 // In Start All APs mode, make sure all APs have finished task.\r
454 //\r
455 if (WaitForAllAPsNotBusy (FALSE)) {\r
456 //\r
457 // Clean the flags update in the function call.\r
458 //\r
459 Released = FALSE;\r
460 for (Index = mMaxNumberOfCpus; Index-- > 0;) {\r
461 //\r
462 // Only In SMM APs need to be clean up.\r
463 //\r
464 if (mSmmMpSyncData->CpuData[Index].Present && mSmmMpSyncData->CpuData[Index].Token != NULL) {\r
465 if (!Released) {\r
466 ReleaseSpinLock (mSmmMpSyncData->CpuData[Index].Token);\r
467 Released = TRUE;\r
468 }\r
469 mSmmMpSyncData->CpuData[Index].Token = NULL;\r
470 }\r
471 }\r
472 }\r
473 } else {\r
474 //\r
475 // In single AP mode.\r
476 //\r
477 if (mSmmMpSyncData->CpuData[CpuIndex].Token != NULL) {\r
478 ReleaseSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Token);\r
479 mSmmMpSyncData->CpuData[CpuIndex].Token = NULL;\r
480 }\r
481 }\r
482}\r
483\r
484/**\r
485 Free the tokens in the maintained list.\r
486\r
487**/\r
488VOID\r
489FreeTokens (\r
490 VOID\r
491 )\r
492{\r
493 LIST_ENTRY *Link;\r
494 PROCEDURE_TOKEN *ProcToken;\r
9caaa79d
ED
495 TOKEN_BUFFER *TokenBuf;\r
496\r
497 //\r
498 // Only free the token buffer recorded in the OldTOkenBufList\r
499 // upon exiting SMI. Current token buffer stays allocated so\r
500 // next SMI doesn't need to re-allocate.\r
501 //\r
502 gSmmCpuPrivate->UsedTokenNum = 0;\r
503\r
504 Link = GetFirstNode (&gSmmCpuPrivate->OldTokenBufList);\r
505 while (!IsNull (&gSmmCpuPrivate->OldTokenBufList, Link)) {\r
506 TokenBuf = TOKEN_BUFFER_FROM_LINK (Link);\r
507\r
508 Link = RemoveEntryList (&TokenBuf->Link);\r
509\r
510 FreePool (TokenBuf->Buffer);\r
511 FreePool (TokenBuf);\r
512 }\r
51dd408a
ED
513\r
514 while (!IsListEmpty (&gSmmCpuPrivate->TokenList)) {\r
515 Link = GetFirstNode (&gSmmCpuPrivate->TokenList);\r
516 ProcToken = PROCEDURE_TOKEN_FROM_LINK (Link);\r
517\r
518 RemoveEntryList (&ProcToken->Link);\r
519\r
51dd408a
ED
520 FreePool (ProcToken);\r
521 }\r
522}\r
523\r
529a5a86
MK
524/**\r
525 SMI handler for BSP.\r
526\r
527 @param CpuIndex BSP processor Index\r
528 @param SyncMode SMM MP sync mode\r
529\r
530**/\r
531VOID\r
532BSPHandler (\r
533 IN UINTN CpuIndex,\r
534 IN SMM_CPU_SYNC_MODE SyncMode\r
535 )\r
536{\r
537 UINTN Index;\r
538 MTRR_SETTINGS Mtrrs;\r
539 UINTN ApCount;\r
540 BOOLEAN ClearTopLevelSmiResult;\r
541 UINTN PresentCount;\r
542\r
543 ASSERT (CpuIndex == mSmmMpSyncData->BspIndex);\r
544 ApCount = 0;\r
545\r
546 //\r
547 // Flag BSP's presence\r
548 //\r
fe3a75bc 549 *mSmmMpSyncData->InsideSmm = TRUE;\r
529a5a86
MK
550\r
551 //\r
552 // Initialize Debug Agent to start source level debug in BSP handler\r
553 //\r
554 InitializeDebugAgent (DEBUG_AGENT_INIT_ENTER_SMI, NULL, NULL);\r
555\r
556 //\r
557 // Mark this processor's presence\r
558 //\r
ed3d5ecb 559 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = TRUE;\r
529a5a86
MK
560\r
561 //\r
562 // Clear platform top level SMI status bit before calling SMI handlers. If\r
563 // we cleared it after SMI handlers are run, we would miss the SMI that\r
564 // occurs after SMI handlers are done and before SMI status bit is cleared.\r
565 //\r
566 ClearTopLevelSmiResult = ClearTopLevelSmiStatus();\r
567 ASSERT (ClearTopLevelSmiResult == TRUE);\r
568\r
569 //\r
570 // Set running processor index\r
571 //\r
572 gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu = CpuIndex;\r
573\r
574 //\r
575 // If Traditional Sync Mode or need to configure MTRRs: gather all available APs.\r
576 //\r
577 if (SyncMode == SmmCpuSyncModeTradition || SmmCpuFeaturesNeedConfigureMtrrs()) {\r
578\r
579 //\r
580 // Wait for APs to arrive\r
581 //\r
582 SmmWaitForApArrival();\r
583\r
584 //\r
585 // Lock the counter down and retrieve the number of APs\r
586 //\r
fe3a75bc
JF
587 *mSmmMpSyncData->AllCpusInSync = TRUE;\r
588 ApCount = LockdownSemaphore (mSmmMpSyncData->Counter) - 1;\r
529a5a86
MK
589\r
590 //\r
591 // Wait for all APs to get ready for programming MTRRs\r
592 //\r
593 WaitForAllAPs (ApCount);\r
594\r
595 if (SmmCpuFeaturesNeedConfigureMtrrs()) {\r
596 //\r
597 // Signal all APs it's time for backup MTRRs\r
598 //\r
599 ReleaseAllAPs ();\r
600\r
601 //\r
602 // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at\r
603 // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set\r
604 // to a large enough value to avoid this situation.\r
605 // Note: For HT capable CPUs, threads within a core share the same set of MTRRs.\r
606 // We do the backup first and then set MTRR to avoid race condition for threads\r
607 // in the same core.\r
608 //\r
609 MtrrGetAllMtrrs(&Mtrrs);\r
610\r
611 //\r
612 // Wait for all APs to complete their MTRR saving\r
613 //\r
614 WaitForAllAPs (ApCount);\r
615\r
616 //\r
617 // Let all processors program SMM MTRRs together\r
618 //\r
619 ReleaseAllAPs ();\r
620\r
621 //\r
622 // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at\r
623 // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set\r
624 // to a large enough value to avoid this situation.\r
625 //\r
626 ReplaceOSMtrrs (CpuIndex);\r
627\r
628 //\r
629 // Wait for all APs to complete their MTRR programming\r
630 //\r
631 WaitForAllAPs (ApCount);\r
632 }\r
633 }\r
634\r
635 //\r
636 // The BUSY lock is initialized to Acquired state\r
637 //\r
170a3c1e 638 AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
529a5a86
MK
639\r
640 //\r
9f419739 641 // Perform the pre tasks\r
529a5a86 642 //\r
9f419739 643 PerformPreTasks ();\r
529a5a86
MK
644\r
645 //\r
646 // Invoke SMM Foundation EntryPoint with the processor information context.\r
647 //\r
648 gSmmCpuPrivate->SmmCoreEntry (&gSmmCpuPrivate->SmmCoreEntryContext);\r
649\r
650 //\r
651 // Make sure all APs have completed their pending none-block tasks\r
652 //\r
51dd408a 653 WaitForAllAPsNotBusy (TRUE);\r
529a5a86
MK
654\r
655 //\r
656 // Perform the remaining tasks\r
657 //\r
658 PerformRemainingTasks ();\r
659\r
660 //\r
661 // If Relaxed-AP Sync Mode: gather all available APs after BSP SMM handlers are done, and\r
662 // make those APs to exit SMI synchronously. APs which arrive later will be excluded and\r
663 // will run through freely.\r
664 //\r
665 if (SyncMode != SmmCpuSyncModeTradition && !SmmCpuFeaturesNeedConfigureMtrrs()) {\r
666\r
667 //\r
668 // Lock the counter down and retrieve the number of APs\r
669 //\r
fe3a75bc
JF
670 *mSmmMpSyncData->AllCpusInSync = TRUE;\r
671 ApCount = LockdownSemaphore (mSmmMpSyncData->Counter) - 1;\r
529a5a86
MK
672 //\r
673 // Make sure all APs have their Present flag set\r
674 //\r
675 while (TRUE) {\r
676 PresentCount = 0;\r
677 for (Index = mMaxNumberOfCpus; Index-- > 0;) {\r
ed3d5ecb 678 if (*(mSmmMpSyncData->CpuData[Index].Present)) {\r
529a5a86
MK
679 PresentCount ++;\r
680 }\r
681 }\r
682 if (PresentCount > ApCount) {\r
683 break;\r
684 }\r
685 }\r
686 }\r
687\r
688 //\r
689 // Notify all APs to exit\r
690 //\r
fe3a75bc 691 *mSmmMpSyncData->InsideSmm = FALSE;\r
529a5a86
MK
692 ReleaseAllAPs ();\r
693\r
694 //\r
695 // Wait for all APs to complete their pending tasks\r
696 //\r
697 WaitForAllAPs (ApCount);\r
698\r
699 if (SmmCpuFeaturesNeedConfigureMtrrs()) {\r
700 //\r
701 // Signal APs to restore MTRRs\r
702 //\r
703 ReleaseAllAPs ();\r
704\r
705 //\r
706 // Restore OS MTRRs\r
707 //\r
708 SmmCpuFeaturesReenableSmrr ();\r
709 MtrrSetAllMtrrs(&Mtrrs);\r
710\r
711 //\r
712 // Wait for all APs to complete MTRR programming\r
713 //\r
714 WaitForAllAPs (ApCount);\r
715 }\r
716\r
717 //\r
718 // Stop source level debug in BSP handler, the code below will not be\r
719 // debugged.\r
720 //\r
721 InitializeDebugAgent (DEBUG_AGENT_INIT_EXIT_SMI, NULL, NULL);\r
722\r
723 //\r
724 // Signal APs to Reset states/semaphore for this processor\r
725 //\r
726 ReleaseAllAPs ();\r
727\r
728 //\r
729 // Perform pending operations for hot-plug\r
730 //\r
731 SmmCpuUpdate ();\r
732\r
733 //\r
734 // Clear the Present flag of BSP\r
735 //\r
ed3d5ecb 736 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;\r
529a5a86
MK
737\r
738 //\r
739 // Gather APs to exit SMM synchronously. Note the Present flag is cleared by now but\r
740 // WaitForAllAps does not depend on the Present flag.\r
741 //\r
742 WaitForAllAPs (ApCount);\r
743\r
51dd408a
ED
744 //\r
745 // Clean the tokens buffer.\r
746 //\r
747 FreeTokens ();\r
748\r
529a5a86
MK
749 //\r
750 // Reset BspIndex to -1, meaning BSP has not been elected.\r
751 //\r
752 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {\r
753 mSmmMpSyncData->BspIndex = (UINT32)-1;\r
754 }\r
755\r
756 //\r
757 // Allow APs to check in from this point on\r
758 //\r
fe3a75bc
JF
759 *mSmmMpSyncData->Counter = 0;\r
760 *mSmmMpSyncData->AllCpusInSync = FALSE;\r
529a5a86
MK
761}\r
762\r
763/**\r
764 SMI handler for AP.\r
765\r
766 @param CpuIndex AP processor Index.\r
767 @param ValidSmi Indicates that current SMI is a valid SMI or not.\r
768 @param SyncMode SMM MP sync mode.\r
769\r
770**/\r
771VOID\r
772APHandler (\r
773 IN UINTN CpuIndex,\r
774 IN BOOLEAN ValidSmi,\r
775 IN SMM_CPU_SYNC_MODE SyncMode\r
776 )\r
777{\r
778 UINT64 Timer;\r
779 UINTN BspIndex;\r
780 MTRR_SETTINGS Mtrrs;\r
51dd408a 781 EFI_STATUS ProcedureStatus;\r
529a5a86
MK
782\r
783 //\r
784 // Timeout BSP\r
785 //\r
786 for (Timer = StartSyncTimer ();\r
787 !IsSyncTimerTimeout (Timer) &&\r
fe3a75bc 788 !(*mSmmMpSyncData->InsideSmm);\r
529a5a86
MK
789 ) {\r
790 CpuPause ();\r
791 }\r
792\r
fe3a75bc 793 if (!(*mSmmMpSyncData->InsideSmm)) {\r
529a5a86
MK
794 //\r
795 // BSP timeout in the first round\r
796 //\r
797 if (mSmmMpSyncData->BspIndex != -1) {\r
798 //\r
799 // BSP Index is known\r
800 //\r
801 BspIndex = mSmmMpSyncData->BspIndex;\r
802 ASSERT (CpuIndex != BspIndex);\r
803\r
804 //\r
805 // Send SMI IPI to bring BSP in\r
806 //\r
807 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[BspIndex].ProcessorId);\r
808\r
809 //\r
810 // Now clock BSP for the 2nd time\r
811 //\r
812 for (Timer = StartSyncTimer ();\r
813 !IsSyncTimerTimeout (Timer) &&\r
fe3a75bc 814 !(*mSmmMpSyncData->InsideSmm);\r
529a5a86
MK
815 ) {\r
816 CpuPause ();\r
817 }\r
818\r
fe3a75bc 819 if (!(*mSmmMpSyncData->InsideSmm)) {\r
529a5a86
MK
820 //\r
821 // Give up since BSP is unable to enter SMM\r
822 // and signal the completion of this AP\r
fe3a75bc 823 WaitForSemaphore (mSmmMpSyncData->Counter);\r
529a5a86
MK
824 return;\r
825 }\r
826 } else {\r
827 //\r
828 // Don't know BSP index. Give up without sending IPI to BSP.\r
829 //\r
fe3a75bc 830 WaitForSemaphore (mSmmMpSyncData->Counter);\r
529a5a86
MK
831 return;\r
832 }\r
833 }\r
834\r
835 //\r
836 // BSP is available\r
837 //\r
838 BspIndex = mSmmMpSyncData->BspIndex;\r
839 ASSERT (CpuIndex != BspIndex);\r
840\r
841 //\r
842 // Mark this processor's presence\r
843 //\r
ed3d5ecb 844 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = TRUE;\r
529a5a86
MK
845\r
846 if (SyncMode == SmmCpuSyncModeTradition || SmmCpuFeaturesNeedConfigureMtrrs()) {\r
847 //\r
848 // Notify BSP of arrival at this point\r
849 //\r
ed3d5ecb 850 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
529a5a86
MK
851 }\r
852\r
853 if (SmmCpuFeaturesNeedConfigureMtrrs()) {\r
854 //\r
855 // Wait for the signal from BSP to backup MTRRs\r
856 //\r
ed3d5ecb 857 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r
529a5a86
MK
858\r
859 //\r
860 // Backup OS MTRRs\r
861 //\r
862 MtrrGetAllMtrrs(&Mtrrs);\r
863\r
864 //\r
865 // Signal BSP the completion of this AP\r
866 //\r
ed3d5ecb 867 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
529a5a86
MK
868\r
869 //\r
870 // Wait for BSP's signal to program MTRRs\r
871 //\r
ed3d5ecb 872 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r
529a5a86
MK
873\r
874 //\r
875 // Replace OS MTRRs with SMI MTRRs\r
876 //\r
877 ReplaceOSMtrrs (CpuIndex);\r
878\r
879 //\r
880 // Signal BSP the completion of this AP\r
881 //\r
ed3d5ecb 882 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
529a5a86
MK
883 }\r
884\r
885 while (TRUE) {\r
886 //\r
887 // Wait for something to happen\r
888 //\r
ed3d5ecb 889 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r
529a5a86
MK
890\r
891 //\r
892 // Check if BSP wants to exit SMM\r
893 //\r
fe3a75bc 894 if (!(*mSmmMpSyncData->InsideSmm)) {\r
529a5a86
MK
895 break;\r
896 }\r
897\r
898 //\r
899 // BUSY should be acquired by SmmStartupThisAp()\r
900 //\r
901 ASSERT (\r
ed3d5ecb 902 !AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[CpuIndex].Busy)\r
529a5a86
MK
903 );\r
904\r
905 //\r
906 // Invoke the scheduled procedure\r
907 //\r
51dd408a
ED
908 ProcedureStatus = (*mSmmMpSyncData->CpuData[CpuIndex].Procedure) (\r
909 (VOID*)mSmmMpSyncData->CpuData[CpuIndex].Parameter\r
910 );\r
911 if (mSmmMpSyncData->CpuData[CpuIndex].Status != NULL) {\r
912 *mSmmMpSyncData->CpuData[CpuIndex].Status = ProcedureStatus;\r
913 }\r
529a5a86
MK
914\r
915 //\r
916 // Release BUSY\r
917 //\r
ed3d5ecb 918 ReleaseSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
51dd408a
ED
919\r
920 ReleaseToken (CpuIndex);\r
529a5a86
MK
921 }\r
922\r
923 if (SmmCpuFeaturesNeedConfigureMtrrs()) {\r
924 //\r
925 // Notify BSP the readiness of this AP to program MTRRs\r
926 //\r
ed3d5ecb 927 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
529a5a86
MK
928\r
929 //\r
930 // Wait for the signal from BSP to program MTRRs\r
931 //\r
ed3d5ecb 932 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r
529a5a86
MK
933\r
934 //\r
935 // Restore OS MTRRs\r
936 //\r
937 SmmCpuFeaturesReenableSmrr ();\r
938 MtrrSetAllMtrrs(&Mtrrs);\r
939 }\r
940\r
941 //\r
942 // Notify BSP the readiness of this AP to Reset states/semaphore for this processor\r
943 //\r
ed3d5ecb 944 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
529a5a86
MK
945\r
946 //\r
947 // Wait for the signal from BSP to Reset states/semaphore for this processor\r
948 //\r
ed3d5ecb 949 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r
529a5a86
MK
950\r
951 //\r
952 // Reset states/semaphore for this processor\r
953 //\r
ed3d5ecb 954 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;\r
529a5a86
MK
955\r
956 //\r
957 // Notify BSP the readiness of this AP to exit SMM\r
958 //\r
ed3d5ecb 959 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
529a5a86
MK
960\r
961}\r
962\r
963/**\r
964 Create 4G PageTable in SMRAM.\r
965\r
717fb604 966 @param[in] Is32BitPageTable Whether the page table is 32-bit PAE\r
529a5a86
MK
967 @return PageTable Address\r
968\r
969**/\r
970UINT32\r
971Gen4GPageTable (\r
881520ea 972 IN BOOLEAN Is32BitPageTable\r
529a5a86
MK
973 )\r
974{\r
975 VOID *PageTable;\r
976 UINTN Index;\r
977 UINT64 *Pte;\r
978 UINTN PagesNeeded;\r
979 UINTN Low2MBoundary;\r
980 UINTN High2MBoundary;\r
981 UINTN Pages;\r
982 UINTN GuardPage;\r
983 UINT64 *Pdpte;\r
984 UINTN PageIndex;\r
985 UINTN PageAddress;\r
986\r
987 Low2MBoundary = 0;\r
988 High2MBoundary = 0;\r
989 PagesNeeded = 0;\r
990 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
991 //\r
992 // Add one more page for known good stack, then find the lower 2MB aligned address.\r
993 //\r
994 Low2MBoundary = (mSmmStackArrayBase + EFI_PAGE_SIZE) & ~(SIZE_2MB-1);\r
995 //\r
996 // Add two more pages for known good stack and stack guard page,\r
997 // then find the lower 2MB aligned address.\r
998 //\r
999 High2MBoundary = (mSmmStackArrayEnd - mSmmStackSize + EFI_PAGE_SIZE * 2) & ~(SIZE_2MB-1);\r
1000 PagesNeeded = ((High2MBoundary - Low2MBoundary) / SIZE_2MB) + 1;\r
1001 }\r
1002 //\r
1003 // Allocate the page table\r
1004 //\r
717fb604 1005 PageTable = AllocatePageTableMemory (5 + PagesNeeded);\r
529a5a86
MK
1006 ASSERT (PageTable != NULL);\r
1007\r
717fb604 1008 PageTable = (VOID *)((UINTN)PageTable);\r
529a5a86
MK
1009 Pte = (UINT64*)PageTable;\r
1010\r
1011 //\r
1012 // Zero out all page table entries first\r
1013 //\r
1014 ZeroMem (Pte, EFI_PAGES_TO_SIZE (1));\r
1015\r
1016 //\r
1017 // Set Page Directory Pointers\r
1018 //\r
1019 for (Index = 0; Index < 4; Index++) {\r
e62a0eb6 1020 Pte[Index] = ((UINTN)PageTable + EFI_PAGE_SIZE * (Index + 1)) | mAddressEncMask |\r
241f9149 1021 (Is32BitPageTable ? IA32_PAE_PDPTE_ATTRIBUTE_BITS : PAGE_ATTRIBUTE_BITS);\r
529a5a86
MK
1022 }\r
1023 Pte += EFI_PAGE_SIZE / sizeof (*Pte);\r
1024\r
1025 //\r
1026 // Fill in Page Directory Entries\r
1027 //\r
1028 for (Index = 0; Index < EFI_PAGE_SIZE * 4 / sizeof (*Pte); Index++) {\r
241f9149 1029 Pte[Index] = (Index << 21) | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;\r
529a5a86
MK
1030 }\r
1031\r
f8c1133b 1032 Pdpte = (UINT64*)PageTable;\r
529a5a86
MK
1033 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
1034 Pages = (UINTN)PageTable + EFI_PAGES_TO_SIZE (5);\r
1035 GuardPage = mSmmStackArrayBase + EFI_PAGE_SIZE;\r
529a5a86 1036 for (PageIndex = Low2MBoundary; PageIndex <= High2MBoundary; PageIndex += SIZE_2MB) {\r
241f9149
LD
1037 Pte = (UINT64*)(UINTN)(Pdpte[BitFieldRead32 ((UINT32)PageIndex, 30, 31)] & ~mAddressEncMask & ~(EFI_PAGE_SIZE - 1));\r
1038 Pte[BitFieldRead32 ((UINT32)PageIndex, 21, 29)] = (UINT64)Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
529a5a86
MK
1039 //\r
1040 // Fill in Page Table Entries\r
1041 //\r
1042 Pte = (UINT64*)Pages;\r
1043 PageAddress = PageIndex;\r
1044 for (Index = 0; Index < EFI_PAGE_SIZE / sizeof (*Pte); Index++) {\r
1045 if (PageAddress == GuardPage) {\r
1046 //\r
1047 // Mark the guard page as non-present\r
1048 //\r
241f9149 1049 Pte[Index] = PageAddress | mAddressEncMask;\r
529a5a86
MK
1050 GuardPage += mSmmStackSize;\r
1051 if (GuardPage > mSmmStackArrayEnd) {\r
1052 GuardPage = 0;\r
1053 }\r
1054 } else {\r
241f9149 1055 Pte[Index] = PageAddress | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
529a5a86
MK
1056 }\r
1057 PageAddress+= EFI_PAGE_SIZE;\r
1058 }\r
1059 Pages += EFI_PAGE_SIZE;\r
1060 }\r
1061 }\r
1062\r
f8c1133b
JW
1063 if ((PcdGet8 (PcdNullPointerDetectionPropertyMask) & BIT1) != 0) {\r
1064 Pte = (UINT64*)(UINTN)(Pdpte[0] & ~mAddressEncMask & ~(EFI_PAGE_SIZE - 1));\r
1065 if ((Pte[0] & IA32_PG_PS) == 0) {\r
1066 // 4K-page entries are already mapped. Just hide the first one anyway.\r
1067 Pte = (UINT64*)(UINTN)(Pte[0] & ~mAddressEncMask & ~(EFI_PAGE_SIZE - 1));\r
79da2d28 1068 Pte[0] &= ~(UINT64)IA32_PG_P; // Hide page 0\r
f8c1133b
JW
1069 } else {\r
1070 // Create 4K-page entries\r
1071 Pages = (UINTN)AllocatePageTableMemory (1);\r
1072 ASSERT (Pages != 0);\r
1073\r
1074 Pte[0] = (UINT64)(Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS);\r
1075\r
1076 Pte = (UINT64*)Pages;\r
1077 PageAddress = 0;\r
1078 Pte[0] = PageAddress | mAddressEncMask; // Hide page 0 but present left\r
1079 for (Index = 1; Index < EFI_PAGE_SIZE / sizeof (*Pte); Index++) {\r
1080 PageAddress += EFI_PAGE_SIZE;\r
1081 Pte[Index] = PageAddress | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
1082 }\r
1083 }\r
1084 }\r
1085\r
529a5a86
MK
1086 return (UINT32)(UINTN)PageTable;\r
1087}\r
1088\r
51dd408a
ED
1089/**\r
1090 Checks whether the input token is the current used token.\r
1091\r
1092 @param[in] Token This parameter describes the token that was passed into DispatchProcedure or\r
1093 BroadcastProcedure.\r
1094\r
1095 @retval TRUE The input token is the current used token.\r
1096 @retval FALSE The input token is not the current used token.\r
1097**/\r
1098BOOLEAN\r
1099IsTokenInUse (\r
1100 IN SPIN_LOCK *Token\r
1101 )\r
1102{\r
1103 LIST_ENTRY *Link;\r
1104 PROCEDURE_TOKEN *ProcToken;\r
1105\r
1106 if (Token == NULL) {\r
1107 return FALSE;\r
1108 }\r
1109\r
1110 Link = GetFirstNode (&gSmmCpuPrivate->TokenList);\r
1111 while (!IsNull (&gSmmCpuPrivate->TokenList, Link)) {\r
1112 ProcToken = PROCEDURE_TOKEN_FROM_LINK (Link);\r
1113\r
1114 if (ProcToken->ProcedureToken == Token) {\r
1115 return TRUE;\r
1116 }\r
1117\r
1118 Link = GetNextNode (&gSmmCpuPrivate->TokenList, Link);\r
1119 }\r
1120\r
1121 return FALSE;\r
1122}\r
1123\r
1124/**\r
1125 create token and save it to the maintain list.\r
1126\r
1127 @retval return the spin lock used as token.\r
1128\r
1129**/\r
1130SPIN_LOCK *\r
1131CreateToken (\r
1132 VOID\r
1133 )\r
1134{\r
9caaa79d 1135 PROCEDURE_TOKEN *ProcToken;\r
51dd408a
ED
1136 SPIN_LOCK *CpuToken;\r
1137 UINTN SpinLockSize;\r
9caaa79d
ED
1138 TOKEN_BUFFER *TokenBuf;\r
1139 UINT32 TokenCountPerChunk;\r
51dd408a
ED
1140\r
1141 SpinLockSize = GetSpinLockProperties ();\r
9caaa79d
ED
1142 TokenCountPerChunk = FixedPcdGet32 (PcdCpuSmmMpTokenCountPerChunk);\r
1143\r
1144 if (gSmmCpuPrivate->UsedTokenNum == TokenCountPerChunk) {\r
1145 DEBUG ((DEBUG_VERBOSE, "CpuSmm: No free token buffer, allocate new buffer!\n"));\r
1146\r
1147 //\r
1148 // Record current token buffer for later free action usage.\r
1149 // Current used token buffer not in this list.\r
1150 //\r
1151 TokenBuf = AllocatePool (sizeof (TOKEN_BUFFER));\r
1152 ASSERT (TokenBuf != NULL);\r
1153 TokenBuf->Signature = TOKEN_BUFFER_SIGNATURE;\r
1154 TokenBuf->Buffer = gSmmCpuPrivate->CurrentTokenBuf;\r
1155\r
1156 InsertTailList (&gSmmCpuPrivate->OldTokenBufList, &TokenBuf->Link);\r
1157\r
1158 gSmmCpuPrivate->CurrentTokenBuf = AllocatePool (SpinLockSize * TokenCountPerChunk);\r
1159 ASSERT (gSmmCpuPrivate->CurrentTokenBuf != NULL);\r
1160 gSmmCpuPrivate->UsedTokenNum = 0;\r
1161 }\r
1162\r
1163 CpuToken = (SPIN_LOCK *)(gSmmCpuPrivate->CurrentTokenBuf + SpinLockSize * gSmmCpuPrivate->UsedTokenNum);\r
1164 gSmmCpuPrivate->UsedTokenNum++;\r
1165\r
51dd408a
ED
1166 InitializeSpinLock (CpuToken);\r
1167 AcquireSpinLock (CpuToken);\r
1168\r
1169 ProcToken = AllocatePool (sizeof (PROCEDURE_TOKEN));\r
1170 ASSERT (ProcToken != NULL);\r
1171 ProcToken->Signature = PROCEDURE_TOKEN_SIGNATURE;\r
1172 ProcToken->ProcedureToken = CpuToken;\r
1173\r
1174 InsertTailList (&gSmmCpuPrivate->TokenList, &ProcToken->Link);\r
1175\r
1176 return CpuToken;\r
1177}\r
1178\r
1179/**\r
1180 Checks status of specified AP.\r
1181\r
1182 This function checks whether the specified AP has finished the task assigned\r
1183 by StartupThisAP(), and whether timeout expires.\r
1184\r
1185 @param[in] Token This parameter describes the token that was passed into DispatchProcedure or\r
1186 BroadcastProcedure.\r
1187\r
1188 @retval EFI_SUCCESS Specified AP has finished task assigned by StartupThisAPs().\r
1189 @retval EFI_NOT_READY Specified AP has not finished task and timeout has not expired.\r
1190**/\r
1191EFI_STATUS\r
1192IsApReady (\r
1193 IN SPIN_LOCK *Token\r
1194 )\r
1195{\r
1196 if (AcquireSpinLockOrFail (Token)) {\r
1197 ReleaseSpinLock (Token);\r
1198 return EFI_SUCCESS;\r
1199 }\r
1200\r
1201 return EFI_NOT_READY;\r
1202}\r
1203\r
529a5a86
MK
1204/**\r
1205 Schedule a procedure to run on the specified CPU.\r
1206\r
717fb604
JY
1207 @param[in] Procedure The address of the procedure to run\r
1208 @param[in] CpuIndex Target CPU Index\r
51dd408a
ED
1209 @param[in,out] ProcArguments The parameter to pass to the procedure\r
1210 @param[in] Token This is an optional parameter that allows the caller to execute the\r
1211 procedure in a blocking or non-blocking fashion. If it is NULL the\r
1212 call is blocking, and the call will not return until the AP has\r
1213 completed the procedure. If the token is not NULL, the call will\r
1214 return immediately. The caller can check whether the procedure has\r
1215 completed with CheckOnProcedure or WaitForProcedure.\r
1216 @param[in] TimeoutInMicroseconds Indicates the time limit in microseconds for the APs to finish\r
1217 execution of Procedure, either for blocking or non-blocking mode.\r
1218 Zero means infinity. If the timeout expires before all APs return\r
1219 from Procedure, then Procedure on the failed APs is terminated. If\r
1220 the timeout expires in blocking mode, the call returns EFI_TIMEOUT.\r
1221 If the timeout expires in non-blocking mode, the timeout determined\r
1222 can be through CheckOnProcedure or WaitForProcedure.\r
1223 Note that timeout support is optional. Whether an implementation\r
1224 supports this feature can be determined via the Attributes data\r
1225 member.\r
1226 @param[in,out] CpuStatus This optional pointer may be used to get the status code returned\r
1227 by Procedure when it completes execution on the target AP, or with\r
1228 EFI_TIMEOUT if the Procedure fails to complete within the optional\r
1229 timeout. The implementation will update this variable with\r
1230 EFI_NOT_READY prior to starting Procedure on the target AP.\r
529a5a86
MK
1231\r
1232 @retval EFI_INVALID_PARAMETER CpuNumber not valid\r
1233 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP\r
1234 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM\r
1235 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy\r
1236 @retval EFI_SUCCESS The procedure has been successfully scheduled\r
1237\r
1238**/\r
1239EFI_STATUS\r
717fb604 1240InternalSmmStartupThisAp (\r
51dd408a
ED
1241 IN EFI_AP_PROCEDURE2 Procedure,\r
1242 IN UINTN CpuIndex,\r
1243 IN OUT VOID *ProcArguments OPTIONAL,\r
1244 IN MM_COMPLETION *Token,\r
1245 IN UINTN TimeoutInMicroseconds,\r
1246 IN OUT EFI_STATUS *CpuStatus\r
529a5a86
MK
1247 )\r
1248{\r
717fb604
JY
1249 if (CpuIndex >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus) {\r
1250 DEBUG((DEBUG_ERROR, "CpuIndex(%d) >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus(%d)\n", CpuIndex, gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus));\r
1251 return EFI_INVALID_PARAMETER;\r
1252 }\r
1253 if (CpuIndex == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) {\r
1254 DEBUG((DEBUG_ERROR, "CpuIndex(%d) == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu\n", CpuIndex));\r
529a5a86
MK
1255 return EFI_INVALID_PARAMETER;\r
1256 }\r
b7025df8
JF
1257 if (gSmmCpuPrivate->ProcessorInfo[CpuIndex].ProcessorId == INVALID_APIC_ID) {\r
1258 return EFI_INVALID_PARAMETER;\r
1259 }\r
717fb604
JY
1260 if (!(*(mSmmMpSyncData->CpuData[CpuIndex].Present))) {\r
1261 if (mSmmMpSyncData->EffectiveSyncMode == SmmCpuSyncModeTradition) {\r
1262 DEBUG((DEBUG_ERROR, "!mSmmMpSyncData->CpuData[%d].Present\n", CpuIndex));\r
1263 }\r
1264 return EFI_INVALID_PARAMETER;\r
1265 }\r
1266 if (gSmmCpuPrivate->Operation[CpuIndex] == SmmCpuRemove) {\r
1267 if (!FeaturePcdGet (PcdCpuHotPlugSupport)) {\r
1268 DEBUG((DEBUG_ERROR, "gSmmCpuPrivate->Operation[%d] == SmmCpuRemove\n", CpuIndex));\r
1269 }\r
1270 return EFI_INVALID_PARAMETER;\r
1271 }\r
51dd408a
ED
1272 if ((TimeoutInMicroseconds != 0) && ((mSmmMp.Attributes & EFI_MM_MP_TIMEOUT_SUPPORTED) == 0)) {\r
1273 return EFI_INVALID_PARAMETER;\r
1274 }\r
1275 if (Procedure == NULL) {\r
1276 return EFI_INVALID_PARAMETER;\r
1277 }\r
717fb604 1278\r
832c4c7a 1279 AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
51dd408a 1280\r
832c4c7a 1281 if (Token != NULL) {\r
51dd408a 1282 *Token = (MM_COMPLETION) CreateToken ();\r
717fb604 1283 }\r
529a5a86
MK
1284\r
1285 mSmmMpSyncData->CpuData[CpuIndex].Procedure = Procedure;\r
1286 mSmmMpSyncData->CpuData[CpuIndex].Parameter = ProcArguments;\r
51dd408a
ED
1287 if (Token != NULL) {\r
1288 mSmmMpSyncData->CpuData[CpuIndex].Token = (SPIN_LOCK *)(*Token);\r
1289 }\r
1290 mSmmMpSyncData->CpuData[CpuIndex].Status = CpuStatus;\r
1291 if (mSmmMpSyncData->CpuData[CpuIndex].Status != NULL) {\r
1292 *mSmmMpSyncData->CpuData[CpuIndex].Status = EFI_NOT_READY;\r
1293 }\r
1294\r
ed3d5ecb 1295 ReleaseSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r
529a5a86 1296\r
51dd408a 1297 if (Token == NULL) {\r
ed3d5ecb
JF
1298 AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
1299 ReleaseSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
529a5a86 1300 }\r
51dd408a
ED
1301\r
1302 return EFI_SUCCESS;\r
1303}\r
1304\r
1305/**\r
1306 Worker function to execute a caller provided function on all enabled APs.\r
1307\r
1308 @param[in] Procedure A pointer to the function to be run on\r
1309 enabled APs of the system.\r
1310 @param[in] TimeoutInMicroseconds Indicates the time limit in microseconds for\r
1311 APs to return from Procedure, either for\r
1312 blocking or non-blocking mode.\r
1313 @param[in,out] ProcedureArguments The parameter passed into Procedure for\r
1314 all APs.\r
1315 @param[in,out] Token This is an optional parameter that allows the caller to execute the\r
1316 procedure in a blocking or non-blocking fashion. If it is NULL the\r
1317 call is blocking, and the call will not return until the AP has\r
1318 completed the procedure. If the token is not NULL, the call will\r
1319 return immediately. The caller can check whether the procedure has\r
1320 completed with CheckOnProcedure or WaitForProcedure.\r
1321 @param[in,out] CPUStatus This optional pointer may be used to get the status code returned\r
1322 by Procedure when it completes execution on the target AP, or with\r
1323 EFI_TIMEOUT if the Procedure fails to complete within the optional\r
1324 timeout. The implementation will update this variable with\r
1325 EFI_NOT_READY prior to starting Procedure on the target AP.\r
1326\r
1327\r
1328 @retval EFI_SUCCESS In blocking mode, all APs have finished before\r
1329 the timeout expired.\r
1330 @retval EFI_SUCCESS In non-blocking mode, function has been dispatched\r
1331 to all enabled APs.\r
1332 @retval others Failed to Startup all APs.\r
1333\r
1334**/\r
1335EFI_STATUS\r
1336InternalSmmStartupAllAPs (\r
1337 IN EFI_AP_PROCEDURE2 Procedure,\r
1338 IN UINTN TimeoutInMicroseconds,\r
1339 IN OUT VOID *ProcedureArguments OPTIONAL,\r
1340 IN OUT MM_COMPLETION *Token,\r
1341 IN OUT EFI_STATUS *CPUStatus\r
1342 )\r
1343{\r
1344 UINTN Index;\r
1345 UINTN CpuCount;\r
1346\r
1347 if ((TimeoutInMicroseconds != 0) && ((mSmmMp.Attributes & EFI_MM_MP_TIMEOUT_SUPPORTED) == 0)) {\r
1348 return EFI_INVALID_PARAMETER;\r
1349 }\r
1350 if (Procedure == NULL) {\r
1351 return EFI_INVALID_PARAMETER;\r
1352 }\r
1353\r
1354 CpuCount = 0;\r
1355 for (Index = mMaxNumberOfCpus; Index-- > 0;) {\r
1356 if (IsPresentAp (Index)) {\r
1357 CpuCount ++;\r
1358\r
1359 if (gSmmCpuPrivate->Operation[Index] == SmmCpuRemove) {\r
1360 return EFI_INVALID_PARAMETER;\r
1361 }\r
1362\r
1363 if (!AcquireSpinLockOrFail(mSmmMpSyncData->CpuData[Index].Busy)) {\r
1364 return EFI_NOT_READY;\r
1365 }\r
1366 ReleaseSpinLock (mSmmMpSyncData->CpuData[Index].Busy);\r
1367 }\r
1368 }\r
1369 if (CpuCount == 0) {\r
1370 return EFI_NOT_STARTED;\r
1371 }\r
1372\r
1373 if (Token != NULL) {\r
1374 *Token = (MM_COMPLETION) CreateToken ();\r
1375 }\r
1376\r
1377 //\r
1378 // Make sure all BUSY should be acquired.\r
1379 //\r
1380 // Because former code already check mSmmMpSyncData->CpuData[***].Busy for each AP.\r
1381 // Here code always use AcquireSpinLock instead of AcquireSpinLockOrFail for not\r
1382 // block mode.\r
1383 //\r
1384 for (Index = mMaxNumberOfCpus; Index-- > 0;) {\r
1385 if (IsPresentAp (Index)) {\r
1386 AcquireSpinLock (mSmmMpSyncData->CpuData[Index].Busy);\r
1387 }\r
1388 }\r
1389\r
1390 for (Index = mMaxNumberOfCpus; Index-- > 0;) {\r
1391 if (IsPresentAp (Index)) {\r
1392 mSmmMpSyncData->CpuData[Index].Procedure = (EFI_AP_PROCEDURE2) Procedure;\r
1393 mSmmMpSyncData->CpuData[Index].Parameter = ProcedureArguments;\r
1394 if (Token != NULL) {\r
1395 mSmmMpSyncData->CpuData[Index].Token = (SPIN_LOCK *)(*Token);\r
1396 }\r
1397 if (CPUStatus != NULL) {\r
1398 mSmmMpSyncData->CpuData[Index].Status = &CPUStatus[Index];\r
1399 if (mSmmMpSyncData->CpuData[Index].Status != NULL) {\r
1400 *mSmmMpSyncData->CpuData[Index].Status = EFI_NOT_READY;\r
1401 }\r
1402 }\r
1403 } else {\r
1404 //\r
1405 // PI spec requirement:\r
1406 // For every excluded processor, the array entry must contain a value of EFI_NOT_STARTED.\r
1407 //\r
1408 if (CPUStatus != NULL) {\r
1409 CPUStatus[Index] = EFI_NOT_STARTED;\r
1410 }\r
1411 }\r
1412 }\r
1413\r
1414 ReleaseAllAPs ();\r
1415\r
1416 if (Token == NULL) {\r
1417 //\r
1418 // Make sure all APs have completed their tasks.\r
1419 //\r
1420 WaitForAllAPsNotBusy (TRUE);\r
1421 }\r
1422\r
1423 return EFI_SUCCESS;\r
1424}\r
1425\r
1426/**\r
1427 ISO C99 6.5.2.2 "Function calls", paragraph 9:\r
1428 If the function is defined with a type that is not compatible with\r
1429 the type (of the expression) pointed to by the expression that\r
1430 denotes the called function, the behavior is undefined.\r
1431\r
1432 So add below wrapper function to convert between EFI_AP_PROCEDURE\r
1433 and EFI_AP_PROCEDURE2.\r
1434\r
1435 Wrapper for Procedures.\r
1436\r
1437 @param[in] Buffer Pointer to PROCEDURE_WRAPPER buffer.\r
1438\r
1439**/\r
1440EFI_STATUS\r
1441EFIAPI\r
1442ProcedureWrapper (\r
5ed4c46f 1443 IN VOID *Buffer\r
51dd408a
ED
1444 )\r
1445{\r
1446 PROCEDURE_WRAPPER *Wrapper;\r
1447\r
1448 Wrapper = Buffer;\r
1449 Wrapper->Procedure (Wrapper->ProcedureArgument);\r
1450\r
529a5a86
MK
1451 return EFI_SUCCESS;\r
1452}\r
1453\r
717fb604
JY
1454/**\r
1455 Schedule a procedure to run on the specified CPU in blocking mode.\r
1456\r
1457 @param[in] Procedure The address of the procedure to run\r
1458 @param[in] CpuIndex Target CPU Index\r
1459 @param[in, out] ProcArguments The parameter to pass to the procedure\r
1460\r
1461 @retval EFI_INVALID_PARAMETER CpuNumber not valid\r
1462 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP\r
1463 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM\r
1464 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy\r
1465 @retval EFI_SUCCESS The procedure has been successfully scheduled\r
1466\r
1467**/\r
1468EFI_STATUS\r
1469EFIAPI\r
1470SmmBlockingStartupThisAp (\r
1471 IN EFI_AP_PROCEDURE Procedure,\r
1472 IN UINTN CpuIndex,\r
1473 IN OUT VOID *ProcArguments OPTIONAL\r
1474 )\r
1475{\r
51dd408a
ED
1476 PROCEDURE_WRAPPER Wrapper;\r
1477\r
1478 Wrapper.Procedure = Procedure;\r
1479 Wrapper.ProcedureArgument = ProcArguments;\r
1480\r
1481 //\r
1482 // Use wrapper function to convert EFI_AP_PROCEDURE to EFI_AP_PROCEDURE2.\r
1483 //\r
1484 return InternalSmmStartupThisAp (ProcedureWrapper, CpuIndex, &Wrapper, NULL, 0, NULL);\r
717fb604
JY
1485}\r
1486\r
1487/**\r
1488 Schedule a procedure to run on the specified CPU.\r
1489\r
1490 @param Procedure The address of the procedure to run\r
1491 @param CpuIndex Target CPU Index\r
1492 @param ProcArguments The parameter to pass to the procedure\r
1493\r
1494 @retval EFI_INVALID_PARAMETER CpuNumber not valid\r
1495 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP\r
1496 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM\r
1497 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy\r
1498 @retval EFI_SUCCESS The procedure has been successfully scheduled\r
1499\r
1500**/\r
1501EFI_STATUS\r
1502EFIAPI\r
1503SmmStartupThisAp (\r
1504 IN EFI_AP_PROCEDURE Procedure,\r
1505 IN UINTN CpuIndex,\r
1506 IN OUT VOID *ProcArguments OPTIONAL\r
1507 )\r
1508{\r
51dd408a
ED
1509 MM_COMPLETION Token;\r
1510\r
1511 gSmmCpuPrivate->ApWrapperFunc[CpuIndex].Procedure = Procedure;\r
1512 gSmmCpuPrivate->ApWrapperFunc[CpuIndex].ProcedureArgument = ProcArguments;\r
1513\r
1514 //\r
1515 // Use wrapper function to convert EFI_AP_PROCEDURE to EFI_AP_PROCEDURE2.\r
1516 //\r
1517 return InternalSmmStartupThisAp (\r
1518 ProcedureWrapper,\r
1519 CpuIndex,\r
1520 &gSmmCpuPrivate->ApWrapperFunc[CpuIndex],\r
1521 FeaturePcdGet (PcdCpuSmmBlockStartupThisAp) ? NULL : &Token,\r
1522 0,\r
1523 NULL\r
1524 );\r
717fb604
JY
1525}\r
1526\r
f45f2d4a 1527/**\r
3eed6dda 1528 This function sets DR6 & DR7 according to SMM save state, before running SMM C code.\r
f45f2d4a
JY
1529 They are useful when you want to enable hardware breakpoints in SMM without entry SMM mode.\r
1530\r
1531 NOTE: It might not be appreciated in runtime since it might\r
1532 conflict with OS debugging facilities. Turn them off in RELEASE.\r
1533\r
1534 @param CpuIndex CPU Index\r
1535\r
1536**/\r
1537VOID\r
1538EFIAPI\r
1539CpuSmmDebugEntry (\r
1540 IN UINTN CpuIndex\r
1541 )\r
1542{\r
1543 SMRAM_SAVE_STATE_MAP *CpuSaveState;\r
7367cc6c 1544\r
f45f2d4a 1545 if (FeaturePcdGet (PcdCpuSmmDebug)) {\r
717fb604 1546 ASSERT(CpuIndex < mMaxNumberOfCpus);\r
3eed6dda 1547 CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];\r
f45f2d4a
JY
1548 if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {\r
1549 AsmWriteDr6 (CpuSaveState->x86._DR6);\r
1550 AsmWriteDr7 (CpuSaveState->x86._DR7);\r
1551 } else {\r
1552 AsmWriteDr6 ((UINTN)CpuSaveState->x64._DR6);\r
1553 AsmWriteDr7 ((UINTN)CpuSaveState->x64._DR7);\r
1554 }\r
1555 }\r
1556}\r
1557\r
1558/**\r
3eed6dda 1559 This function restores DR6 & DR7 to SMM save state.\r
f45f2d4a
JY
1560\r
1561 NOTE: It might not be appreciated in runtime since it might\r
1562 conflict with OS debugging facilities. Turn them off in RELEASE.\r
1563\r
1564 @param CpuIndex CPU Index\r
1565\r
1566**/\r
1567VOID\r
1568EFIAPI\r
1569CpuSmmDebugExit (\r
1570 IN UINTN CpuIndex\r
1571 )\r
1572{\r
1573 SMRAM_SAVE_STATE_MAP *CpuSaveState;\r
1574\r
1575 if (FeaturePcdGet (PcdCpuSmmDebug)) {\r
717fb604 1576 ASSERT(CpuIndex < mMaxNumberOfCpus);\r
3eed6dda 1577 CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];\r
f45f2d4a
JY
1578 if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {\r
1579 CpuSaveState->x86._DR7 = (UINT32)AsmReadDr7 ();\r
1580 CpuSaveState->x86._DR6 = (UINT32)AsmReadDr6 ();\r
1581 } else {\r
1582 CpuSaveState->x64._DR7 = AsmReadDr7 ();\r
1583 CpuSaveState->x64._DR6 = AsmReadDr6 ();\r
1584 }\r
1585 }\r
1586}\r
1587\r
529a5a86
MK
1588/**\r
1589 C function for SMI entry, each processor comes here upon SMI trigger.\r
1590\r
1591 @param CpuIndex CPU Index\r
1592\r
1593**/\r
1594VOID\r
1595EFIAPI\r
1596SmiRendezvous (\r
1597 IN UINTN CpuIndex\r
1598 )\r
1599{\r
f85d3ce2
JF
1600 EFI_STATUS Status;\r
1601 BOOLEAN ValidSmi;\r
1602 BOOLEAN IsBsp;\r
1603 BOOLEAN BspInProgress;\r
1604 UINTN Index;\r
1605 UINTN Cr2;\r
717fb604
JY
1606\r
1607 ASSERT(CpuIndex < mMaxNumberOfCpus);\r
529a5a86
MK
1608\r
1609 //\r
37f9fea5
VN
1610 // Save Cr2 because Page Fault exception in SMM may override its value,\r
1611 // when using on-demand paging for above 4G memory.\r
529a5a86 1612 //\r
37f9fea5
VN
1613 Cr2 = 0;\r
1614 SaveCr2 (&Cr2);\r
529a5a86 1615\r
51dd408a
ED
1616 //\r
1617 // Call the user register Startup function first.\r
1618 //\r
1619 if (mSmmMpSyncData->StartupProcedure != NULL) {\r
1620 mSmmMpSyncData->StartupProcedure (mSmmMpSyncData->StartupProcArgs);\r
1621 }\r
1622\r
529a5a86
MK
1623 //\r
1624 // Perform CPU specific entry hooks\r
1625 //\r
1626 SmmCpuFeaturesRendezvousEntry (CpuIndex);\r
1627\r
1628 //\r
1629 // Determine if this is a valid SMI\r
1630 //\r
1631 ValidSmi = PlatformValidSmi();\r
1632\r
1633 //\r
1634 // Determine if BSP has been already in progress. Note this must be checked after\r
1635 // ValidSmi because BSP may clear a valid SMI source after checking in.\r
1636 //\r
fe3a75bc 1637 BspInProgress = *mSmmMpSyncData->InsideSmm;\r
529a5a86
MK
1638\r
1639 if (!BspInProgress && !ValidSmi) {\r
1640 //\r
1641 // If we reach here, it means when we sampled the ValidSmi flag, SMI status had not\r
1642 // been cleared by BSP in a new SMI run (so we have a truly invalid SMI), or SMI\r
1643 // status had been cleared by BSP and an existing SMI run has almost ended. (Note\r
1644 // we sampled ValidSmi flag BEFORE judging BSP-in-progress status.) In both cases, there\r
1645 // is nothing we need to do.\r
1646 //\r
1647 goto Exit;\r
1648 } else {\r
1649 //\r
1650 // Signal presence of this processor\r
1651 //\r
fe3a75bc 1652 if (ReleaseSemaphore (mSmmMpSyncData->Counter) == 0) {\r
529a5a86
MK
1653 //\r
1654 // BSP has already ended the synchronization, so QUIT!!!\r
1655 //\r
1656\r
1657 //\r
1658 // Wait for BSP's signal to finish SMI\r
1659 //\r
fe3a75bc 1660 while (*mSmmMpSyncData->AllCpusInSync) {\r
529a5a86
MK
1661 CpuPause ();\r
1662 }\r
1663 goto Exit;\r
1664 } else {\r
1665\r
1666 //\r
1667 // The BUSY lock is initialized to Released state.\r
1668 // This needs to be done early enough to be ready for BSP's SmmStartupThisAp() call.\r
1669 // E.g., with Relaxed AP flow, SmmStartupThisAp() may be called immediately\r
1670 // after AP's present flag is detected.\r
1671 //\r
ed3d5ecb 1672 InitializeSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
529a5a86
MK
1673 }\r
1674\r
529a5a86
MK
1675 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r
1676 ActivateSmmProfile (CpuIndex);\r
1677 }\r
1678\r
1679 if (BspInProgress) {\r
1680 //\r
1681 // BSP has been elected. Follow AP path, regardless of ValidSmi flag\r
1682 // as BSP may have cleared the SMI status\r
1683 //\r
1684 APHandler (CpuIndex, ValidSmi, mSmmMpSyncData->EffectiveSyncMode);\r
1685 } else {\r
1686 //\r
1687 // We have a valid SMI\r
1688 //\r
1689\r
1690 //\r
1691 // Elect BSP\r
1692 //\r
1693 IsBsp = FALSE;\r
1694 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {\r
1695 if (!mSmmMpSyncData->SwitchBsp || mSmmMpSyncData->CandidateBsp[CpuIndex]) {\r
1696 //\r
1697 // Call platform hook to do BSP election\r
1698 //\r
1699 Status = PlatformSmmBspElection (&IsBsp);\r
1700 if (EFI_SUCCESS == Status) {\r
1701 //\r
1702 // Platform hook determines successfully\r
1703 //\r
1704 if (IsBsp) {\r
1705 mSmmMpSyncData->BspIndex = (UINT32)CpuIndex;\r
1706 }\r
1707 } else {\r
1708 //\r
1709 // Platform hook fails to determine, use default BSP election method\r
1710 //\r
1711 InterlockedCompareExchange32 (\r
1712 (UINT32*)&mSmmMpSyncData->BspIndex,\r
1713 (UINT32)-1,\r
1714 (UINT32)CpuIndex\r
1715 );\r
1716 }\r
1717 }\r
1718 }\r
1719\r
1720 //\r
1721 // "mSmmMpSyncData->BspIndex == CpuIndex" means this is the BSP\r
1722 //\r
1723 if (mSmmMpSyncData->BspIndex == CpuIndex) {\r
1724\r
1725 //\r
1726 // Clear last request for SwitchBsp.\r
1727 //\r
1728 if (mSmmMpSyncData->SwitchBsp) {\r
1729 mSmmMpSyncData->SwitchBsp = FALSE;\r
1730 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
1731 mSmmMpSyncData->CandidateBsp[Index] = FALSE;\r
1732 }\r
1733 }\r
1734\r
1735 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r
1736 SmmProfileRecordSmiNum ();\r
1737 }\r
1738\r
1739 //\r
1740 // BSP Handler is always called with a ValidSmi == TRUE\r
1741 //\r
1742 BSPHandler (CpuIndex, mSmmMpSyncData->EffectiveSyncMode);\r
529a5a86
MK
1743 } else {\r
1744 APHandler (CpuIndex, ValidSmi, mSmmMpSyncData->EffectiveSyncMode);\r
1745 }\r
1746 }\r
1747\r
ed3d5ecb 1748 ASSERT (*mSmmMpSyncData->CpuData[CpuIndex].Run == 0);\r
529a5a86
MK
1749\r
1750 //\r
1751 // Wait for BSP's signal to exit SMI\r
1752 //\r
fe3a75bc 1753 while (*mSmmMpSyncData->AllCpusInSync) {\r
529a5a86
MK
1754 CpuPause ();\r
1755 }\r
1756 }\r
1757\r
1758Exit:\r
1759 SmmCpuFeaturesRendezvousExit (CpuIndex);\r
37f9fea5 1760\r
529a5a86
MK
1761 //\r
1762 // Restore Cr2\r
1763 //\r
37f9fea5 1764 RestoreCr2 (Cr2);\r
529a5a86
MK
1765}\r
1766\r
51dd408a
ED
1767/**\r
1768 Allocate buffer for SpinLock and Wrapper function buffer.\r
1769\r
1770**/\r
1771VOID\r
1772InitializeDataForMmMp (\r
1773 VOID\r
1774 )\r
1775{\r
9caaa79d
ED
1776 UINTN SpinLockSize;\r
1777 UINT32 TokenCountPerChunk;\r
1778\r
1779 SpinLockSize = GetSpinLockProperties ();\r
1780 TokenCountPerChunk = FixedPcdGet32 (PcdCpuSmmMpTokenCountPerChunk);\r
1781 ASSERT (TokenCountPerChunk != 0);\r
1782 if (TokenCountPerChunk == 0) {\r
1783 DEBUG ((DEBUG_ERROR, "PcdCpuSmmMpTokenCountPerChunk should not be Zero!\n"));\r
1784 CpuDeadLoop ();\r
1785 }\r
1786 DEBUG ((DEBUG_INFO, "CpuSmm: SpinLock Size = 0x%x, PcdCpuSmmMpTokenCountPerChunk = 0x%x\n", SpinLockSize, TokenCountPerChunk));\r
1787\r
1788 gSmmCpuPrivate->CurrentTokenBuf = AllocatePool (SpinLockSize * TokenCountPerChunk);\r
1789 ASSERT (gSmmCpuPrivate->CurrentTokenBuf != NULL);\r
1790\r
1791 gSmmCpuPrivate->UsedTokenNum = 0;\r
1792\r
51dd408a
ED
1793 gSmmCpuPrivate->ApWrapperFunc = AllocatePool (sizeof (PROCEDURE_WRAPPER) * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus);\r
1794 ASSERT (gSmmCpuPrivate->ApWrapperFunc != NULL);\r
1795\r
1796 InitializeListHead (&gSmmCpuPrivate->TokenList);\r
9caaa79d 1797 InitializeListHead (&gSmmCpuPrivate->OldTokenBufList);\r
51dd408a
ED
1798}\r
1799\r
1d648531
JF
1800/**\r
1801 Allocate buffer for all semaphores and spin locks.\r
1802\r
1803**/\r
1804VOID\r
1805InitializeSmmCpuSemaphores (\r
1806 VOID\r
1807 )\r
1808{\r
1809 UINTN ProcessorCount;\r
1810 UINTN TotalSize;\r
1811 UINTN GlobalSemaphoresSize;\r
4e920581 1812 UINTN CpuSemaphoresSize;\r
1d648531
JF
1813 UINTN SemaphoreSize;\r
1814 UINTN Pages;\r
1815 UINTN *SemaphoreBlock;\r
1816 UINTN SemaphoreAddr;\r
1817\r
1818 SemaphoreSize = GetSpinLockProperties ();\r
1819 ProcessorCount = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;\r
1820 GlobalSemaphoresSize = (sizeof (SMM_CPU_SEMAPHORE_GLOBAL) / sizeof (VOID *)) * SemaphoreSize;\r
4e920581 1821 CpuSemaphoresSize = (sizeof (SMM_CPU_SEMAPHORE_CPU) / sizeof (VOID *)) * ProcessorCount * SemaphoreSize;\r
31fb3334 1822 TotalSize = GlobalSemaphoresSize + CpuSemaphoresSize;\r
1d648531
JF
1823 DEBUG((EFI_D_INFO, "One Semaphore Size = 0x%x\n", SemaphoreSize));\r
1824 DEBUG((EFI_D_INFO, "Total Semaphores Size = 0x%x\n", TotalSize));\r
1825 Pages = EFI_SIZE_TO_PAGES (TotalSize);\r
1826 SemaphoreBlock = AllocatePages (Pages);\r
1827 ASSERT (SemaphoreBlock != NULL);\r
1828 ZeroMem (SemaphoreBlock, TotalSize);\r
1829\r
1830 SemaphoreAddr = (UINTN)SemaphoreBlock;\r
1831 mSmmCpuSemaphores.SemaphoreGlobal.Counter = (UINT32 *)SemaphoreAddr;\r
1832 SemaphoreAddr += SemaphoreSize;\r
1833 mSmmCpuSemaphores.SemaphoreGlobal.InsideSmm = (BOOLEAN *)SemaphoreAddr;\r
1834 SemaphoreAddr += SemaphoreSize;\r
1835 mSmmCpuSemaphores.SemaphoreGlobal.AllCpusInSync = (BOOLEAN *)SemaphoreAddr;\r
1836 SemaphoreAddr += SemaphoreSize;\r
1837 mSmmCpuSemaphores.SemaphoreGlobal.PFLock = (SPIN_LOCK *)SemaphoreAddr;\r
1838 SemaphoreAddr += SemaphoreSize;\r
1839 mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock\r
1840 = (SPIN_LOCK *)SemaphoreAddr;\r
6c4c15fa 1841 SemaphoreAddr += SemaphoreSize;\r
6c4c15fa 1842\r
4e920581
JF
1843 SemaphoreAddr = (UINTN)SemaphoreBlock + GlobalSemaphoresSize;\r
1844 mSmmCpuSemaphores.SemaphoreCpu.Busy = (SPIN_LOCK *)SemaphoreAddr;\r
1845 SemaphoreAddr += ProcessorCount * SemaphoreSize;\r
1846 mSmmCpuSemaphores.SemaphoreCpu.Run = (UINT32 *)SemaphoreAddr;\r
1847 SemaphoreAddr += ProcessorCount * SemaphoreSize;\r
1848 mSmmCpuSemaphores.SemaphoreCpu.Present = (BOOLEAN *)SemaphoreAddr;\r
1849\r
fe3a75bc
JF
1850 mPFLock = mSmmCpuSemaphores.SemaphoreGlobal.PFLock;\r
1851 mConfigSmmCodeAccessCheckLock = mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock;\r
1852\r
1d648531
JF
1853 mSemaphoreSize = SemaphoreSize;\r
1854}\r
529a5a86
MK
1855\r
1856/**\r
1857 Initialize un-cacheable data.\r
1858\r
1859**/\r
1860VOID\r
1861EFIAPI\r
1862InitializeMpSyncData (\r
1863 VOID\r
1864 )\r
1865{\r
8b9311b7
JF
1866 UINTN CpuIndex;\r
1867\r
529a5a86 1868 if (mSmmMpSyncData != NULL) {\r
e78a2a49
JF
1869 //\r
1870 // mSmmMpSyncDataSize includes one structure of SMM_DISPATCHER_MP_SYNC_DATA, one\r
1871 // CpuData array of SMM_CPU_DATA_BLOCK and one CandidateBsp array of BOOLEAN.\r
1872 //\r
1873 ZeroMem (mSmmMpSyncData, mSmmMpSyncDataSize);\r
529a5a86
MK
1874 mSmmMpSyncData->CpuData = (SMM_CPU_DATA_BLOCK *)((UINT8 *)mSmmMpSyncData + sizeof (SMM_DISPATCHER_MP_SYNC_DATA));\r
1875 mSmmMpSyncData->CandidateBsp = (BOOLEAN *)(mSmmMpSyncData->CpuData + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus);\r
1876 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {\r
1877 //\r
1878 // Enable BSP election by setting BspIndex to -1\r
1879 //\r
1880 mSmmMpSyncData->BspIndex = (UINT32)-1;\r
1881 }\r
b43dd229 1882 mSmmMpSyncData->EffectiveSyncMode = mCpuSmmSyncMode;\r
1d648531 1883\r
8b9311b7
JF
1884 mSmmMpSyncData->Counter = mSmmCpuSemaphores.SemaphoreGlobal.Counter;\r
1885 mSmmMpSyncData->InsideSmm = mSmmCpuSemaphores.SemaphoreGlobal.InsideSmm;\r
1886 mSmmMpSyncData->AllCpusInSync = mSmmCpuSemaphores.SemaphoreGlobal.AllCpusInSync;\r
1887 ASSERT (mSmmMpSyncData->Counter != NULL && mSmmMpSyncData->InsideSmm != NULL &&\r
1888 mSmmMpSyncData->AllCpusInSync != NULL);\r
1889 *mSmmMpSyncData->Counter = 0;\r
1890 *mSmmMpSyncData->InsideSmm = FALSE;\r
1891 *mSmmMpSyncData->AllCpusInSync = FALSE;\r
1892\r
1893 for (CpuIndex = 0; CpuIndex < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; CpuIndex ++) {\r
1894 mSmmMpSyncData->CpuData[CpuIndex].Busy =\r
1895 (SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Busy + mSemaphoreSize * CpuIndex);\r
1896 mSmmMpSyncData->CpuData[CpuIndex].Run =\r
1897 (UINT32 *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Run + mSemaphoreSize * CpuIndex);\r
1898 mSmmMpSyncData->CpuData[CpuIndex].Present =\r
1899 (BOOLEAN *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Present + mSemaphoreSize * CpuIndex);\r
56e4a7d7
JF
1900 *(mSmmMpSyncData->CpuData[CpuIndex].Busy) = 0;\r
1901 *(mSmmMpSyncData->CpuData[CpuIndex].Run) = 0;\r
1902 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;\r
8b9311b7 1903 }\r
529a5a86
MK
1904 }\r
1905}\r
1906\r
1907/**\r
1908 Initialize global data for MP synchronization.\r
1909\r
3eb69b08
JY
1910 @param Stacks Base address of SMI stack buffer for all processors.\r
1911 @param StackSize Stack size for each processor in SMM.\r
1912 @param ShadowStackSize Shadow Stack size for each processor in SMM.\r
529a5a86
MK
1913\r
1914**/\r
1915UINT32\r
1916InitializeMpServiceData (\r
1917 IN VOID *Stacks,\r
3eb69b08
JY
1918 IN UINTN StackSize,\r
1919 IN UINTN ShadowStackSize\r
529a5a86
MK
1920 )\r
1921{\r
1922 UINT32 Cr3;\r
1923 UINTN Index;\r
529a5a86 1924 UINT8 *GdtTssTables;\r
529a5a86 1925 UINTN GdtTableStepSize;\r
ba40cb31
MK
1926 CPUID_VERSION_INFO_EDX RegEdx;\r
1927\r
1928 //\r
1929 // Determine if this CPU supports machine check\r
1930 //\r
1931 AsmCpuid (CPUID_VERSION_INFO, NULL, NULL, NULL, &RegEdx.Uint32);\r
1932 mMachineCheckSupported = (BOOLEAN)(RegEdx.Bits.MCA == 1);\r
529a5a86 1933\r
8b9311b7
JF
1934 //\r
1935 // Allocate memory for all locks and semaphores\r
1936 //\r
1937 InitializeSmmCpuSemaphores ();\r
1938\r
d67b73cc
JF
1939 //\r
1940 // Initialize mSmmMpSyncData\r
1941 //\r
1942 mSmmMpSyncDataSize = sizeof (SMM_DISPATCHER_MP_SYNC_DATA) +\r
1943 (sizeof (SMM_CPU_DATA_BLOCK) + sizeof (BOOLEAN)) * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;\r
1944 mSmmMpSyncData = (SMM_DISPATCHER_MP_SYNC_DATA*) AllocatePages (EFI_SIZE_TO_PAGES (mSmmMpSyncDataSize));\r
1945 ASSERT (mSmmMpSyncData != NULL);\r
b43dd229 1946 mCpuSmmSyncMode = (SMM_CPU_SYNC_MODE)PcdGet8 (PcdCpuSmmSyncMode);\r
d67b73cc
JF
1947 InitializeMpSyncData ();\r
1948\r
529a5a86
MK
1949 //\r
1950 // Initialize physical address mask\r
1951 // NOTE: Physical memory above virtual address limit is not supported !!!\r
1952 //\r
1953 AsmCpuid (0x80000008, (UINT32*)&Index, NULL, NULL, NULL);\r
1954 gPhyMask = LShiftU64 (1, (UINT8)Index) - 1;\r
1955 gPhyMask &= (1ull << 48) - EFI_PAGE_SIZE;\r
1956\r
1957 //\r
1958 // Create page tables\r
1959 //\r
1960 Cr3 = SmmInitPageTable ();\r
1961\r
fe5f1949 1962 GdtTssTables = InitGdt (Cr3, &GdtTableStepSize);\r
529a5a86
MK
1963\r
1964 //\r
f12367a0 1965 // Install SMI handler for each CPU\r
529a5a86
MK
1966 //\r
1967 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
529a5a86
MK
1968 InstallSmiHandler (\r
1969 Index,\r
1970 (UINT32)mCpuHotPlugData.SmBase[Index],\r
3eb69b08 1971 (VOID*)((UINTN)Stacks + (StackSize + ShadowStackSize) * Index),\r
529a5a86 1972 StackSize,\r
f12367a0
MK
1973 (UINTN)(GdtTssTables + GdtTableStepSize * Index),\r
1974 gcSmiGdtr.Limit + 1,\r
529a5a86
MK
1975 gcSmiIdtr.Base,\r
1976 gcSmiIdtr.Limit + 1,\r
1977 Cr3\r
1978 );\r
1979 }\r
1980\r
529a5a86
MK
1981 //\r
1982 // Record current MTRR settings\r
1983 //\r
26ab5ac3
MK
1984 ZeroMem (&gSmiMtrrs, sizeof (gSmiMtrrs));\r
1985 MtrrGetAllMtrrs (&gSmiMtrrs);\r
529a5a86
MK
1986\r
1987 return Cr3;\r
1988}\r
1989\r
1990/**\r
1991\r
1992 Register the SMM Foundation entry point.\r
1993\r
1994 @param This Pointer to EFI_SMM_CONFIGURATION_PROTOCOL instance\r
1995 @param SmmEntryPoint SMM Foundation EntryPoint\r
1996\r
1997 @retval EFI_SUCCESS Successfully to register SMM foundation entry point\r
1998\r
1999**/\r
2000EFI_STATUS\r
2001EFIAPI\r
2002RegisterSmmEntry (\r
2003 IN CONST EFI_SMM_CONFIGURATION_PROTOCOL *This,\r
2004 IN EFI_SMM_ENTRY_POINT SmmEntryPoint\r
2005 )\r
2006{\r
2007 //\r
2008 // Record SMM Foundation EntryPoint, later invoke it on SMI entry vector.\r
2009 //\r
2010 gSmmCpuPrivate->SmmCoreEntry = SmmEntryPoint;\r
2011 return EFI_SUCCESS;\r
2012}\r
51dd408a
ED
2013\r
2014/**\r
2015\r
2016 Register the SMM Foundation entry point.\r
2017\r
2018 @param[in] Procedure A pointer to the code stream to be run on the designated target AP\r
2019 of the system. Type EFI_AP_PROCEDURE is defined below in Volume 2\r
2020 with the related definitions of\r
2021 EFI_MP_SERVICES_PROTOCOL.StartupAllAPs.\r
2022 If caller may pass a value of NULL to deregister any existing\r
2023 startup procedure.\r
073f2ced 2024 @param[in,out] ProcedureArguments Allows the caller to pass a list of parameters to the code that is\r
51dd408a
ED
2025 run by the AP. It is an optional common mailbox between APs and\r
2026 the caller to share information\r
2027\r
2028 @retval EFI_SUCCESS The Procedure has been set successfully.\r
2029 @retval EFI_INVALID_PARAMETER The Procedure is NULL but ProcedureArguments not NULL.\r
2030\r
2031**/\r
2032EFI_STATUS\r
2033RegisterStartupProcedure (\r
073f2ced
SZ
2034 IN EFI_AP_PROCEDURE Procedure,\r
2035 IN OUT VOID *ProcedureArguments OPTIONAL\r
51dd408a
ED
2036 )\r
2037{\r
2038 if (Procedure == NULL && ProcedureArguments != NULL) {\r
2039 return EFI_INVALID_PARAMETER;\r
2040 }\r
2041 if (mSmmMpSyncData == NULL) {\r
2042 return EFI_NOT_READY;\r
2043 }\r
2044\r
2045 mSmmMpSyncData->StartupProcedure = Procedure;\r
2046 mSmmMpSyncData->StartupProcArgs = ProcedureArguments;\r
2047\r
2048 return EFI_SUCCESS;\r
2049}\r