]> git.proxmox.com Git - mirror_edk2.git/blame_incremental - UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c
BaseTools/GenFw AARCH64: fix up GOT based relative relocations
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / MpService.c
... / ...
CommitLineData
1/** @file\r
2SMM MP service implementation\r
3\r
4Copyright (c) 2009 - 2019, Intel Corporation. All rights reserved.<BR>\r
5Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>\r
6\r
7SPDX-License-Identifier: BSD-2-Clause-Patent\r
8\r
9**/\r
10\r
11#include "PiSmmCpuDxeSmm.h"\r
12\r
13//\r
14// Slots for all MTRR( FIXED MTRR + VARIABLE MTRR + MTRR_LIB_IA32_MTRR_DEF_TYPE)\r
15//\r
16MTRR_SETTINGS gSmiMtrrs;\r
17UINT64 gPhyMask;\r
18SMM_DISPATCHER_MP_SYNC_DATA *mSmmMpSyncData = NULL;\r
19UINTN mSmmMpSyncDataSize;\r
20SMM_CPU_SEMAPHORES mSmmCpuSemaphores;\r
21UINTN mSemaphoreSize;\r
22SPIN_LOCK *mPFLock = NULL;\r
23SMM_CPU_SYNC_MODE mCpuSmmSyncMode;\r
24BOOLEAN mMachineCheckSupported = FALSE;\r
25\r
26/**\r
27 Performs an atomic compare exchange operation to get semaphore.\r
28 The compare exchange operation must be performed using\r
29 MP safe mechanisms.\r
30\r
31 @param Sem IN: 32-bit unsigned integer\r
32 OUT: original integer - 1\r
33 @return Original integer - 1\r
34\r
35**/\r
36UINT32\r
37WaitForSemaphore (\r
38 IN OUT volatile UINT32 *Sem\r
39 )\r
40{\r
41 UINT32 Value;\r
42\r
43 do {\r
44 Value = *Sem;\r
45 } while (Value == 0 ||\r
46 InterlockedCompareExchange32 (\r
47 (UINT32*)Sem,\r
48 Value,\r
49 Value - 1\r
50 ) != Value);\r
51 return Value - 1;\r
52}\r
53\r
54\r
55/**\r
56 Performs an atomic compare exchange operation to release semaphore.\r
57 The compare exchange operation must be performed using\r
58 MP safe mechanisms.\r
59\r
60 @param Sem IN: 32-bit unsigned integer\r
61 OUT: original integer + 1\r
62 @return Original integer + 1\r
63\r
64**/\r
65UINT32\r
66ReleaseSemaphore (\r
67 IN OUT volatile UINT32 *Sem\r
68 )\r
69{\r
70 UINT32 Value;\r
71\r
72 do {\r
73 Value = *Sem;\r
74 } while (Value + 1 != 0 &&\r
75 InterlockedCompareExchange32 (\r
76 (UINT32*)Sem,\r
77 Value,\r
78 Value + 1\r
79 ) != Value);\r
80 return Value + 1;\r
81}\r
82\r
83/**\r
84 Performs an atomic compare exchange operation to lock semaphore.\r
85 The compare exchange operation must be performed using\r
86 MP safe mechanisms.\r
87\r
88 @param Sem IN: 32-bit unsigned integer\r
89 OUT: -1\r
90 @return Original integer\r
91\r
92**/\r
93UINT32\r
94LockdownSemaphore (\r
95 IN OUT volatile UINT32 *Sem\r
96 )\r
97{\r
98 UINT32 Value;\r
99\r
100 do {\r
101 Value = *Sem;\r
102 } while (InterlockedCompareExchange32 (\r
103 (UINT32*)Sem,\r
104 Value, (UINT32)-1\r
105 ) != Value);\r
106 return Value;\r
107}\r
108\r
109/**\r
110 Wait all APs to performs an atomic compare exchange operation to release semaphore.\r
111\r
112 @param NumberOfAPs AP number\r
113\r
114**/\r
115VOID\r
116WaitForAllAPs (\r
117 IN UINTN NumberOfAPs\r
118 )\r
119{\r
120 UINTN BspIndex;\r
121\r
122 BspIndex = mSmmMpSyncData->BspIndex;\r
123 while (NumberOfAPs-- > 0) {\r
124 WaitForSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
125 }\r
126}\r
127\r
128/**\r
129 Performs an atomic compare exchange operation to release semaphore\r
130 for each AP.\r
131\r
132**/\r
133VOID\r
134ReleaseAllAPs (\r
135 VOID\r
136 )\r
137{\r
138 UINTN Index;\r
139\r
140 for (Index = mMaxNumberOfCpus; Index-- > 0;) {\r
141 if (IsPresentAp (Index)) {\r
142 ReleaseSemaphore (mSmmMpSyncData->CpuData[Index].Run);\r
143 }\r
144 }\r
145}\r
146\r
147/**\r
148 Checks if all CPUs (with certain exceptions) have checked in for this SMI run\r
149\r
150 @param Exceptions CPU Arrival exception flags.\r
151\r
152 @retval TRUE if all CPUs the have checked in.\r
153 @retval FALSE if at least one Normal AP hasn't checked in.\r
154\r
155**/\r
156BOOLEAN\r
157AllCpusInSmmWithExceptions (\r
158 SMM_CPU_ARRIVAL_EXCEPTIONS Exceptions\r
159 )\r
160{\r
161 UINTN Index;\r
162 SMM_CPU_DATA_BLOCK *CpuData;\r
163 EFI_PROCESSOR_INFORMATION *ProcessorInfo;\r
164\r
165 ASSERT (*mSmmMpSyncData->Counter <= mNumberOfCpus);\r
166\r
167 if (*mSmmMpSyncData->Counter == mNumberOfCpus) {\r
168 return TRUE;\r
169 }\r
170\r
171 CpuData = mSmmMpSyncData->CpuData;\r
172 ProcessorInfo = gSmmCpuPrivate->ProcessorInfo;\r
173 for (Index = mMaxNumberOfCpus; Index-- > 0;) {\r
174 if (!(*(CpuData[Index].Present)) && ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID) {\r
175 if (((Exceptions & ARRIVAL_EXCEPTION_DELAYED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmDelayed) != 0) {\r
176 continue;\r
177 }\r
178 if (((Exceptions & ARRIVAL_EXCEPTION_BLOCKED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmBlocked) != 0) {\r
179 continue;\r
180 }\r
181 if (((Exceptions & ARRIVAL_EXCEPTION_SMI_DISABLED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmEnable) != 0) {\r
182 continue;\r
183 }\r
184 return FALSE;\r
185 }\r
186 }\r
187\r
188\r
189 return TRUE;\r
190}\r
191\r
192/**\r
193 Has OS enabled Lmce in the MSR_IA32_MCG_EXT_CTL\r
194\r
195 @retval TRUE Os enable lmce.\r
196 @retval FALSE Os not enable lmce.\r
197\r
198**/\r
199BOOLEAN\r
200IsLmceOsEnabled (\r
201 VOID\r
202 )\r
203{\r
204 MSR_IA32_MCG_CAP_REGISTER McgCap;\r
205 MSR_IA32_FEATURE_CONTROL_REGISTER FeatureCtrl;\r
206 MSR_IA32_MCG_EXT_CTL_REGISTER McgExtCtrl;\r
207\r
208 McgCap.Uint64 = AsmReadMsr64 (MSR_IA32_MCG_CAP);\r
209 if (McgCap.Bits.MCG_LMCE_P == 0) {\r
210 return FALSE;\r
211 }\r
212\r
213 FeatureCtrl.Uint64 = AsmReadMsr64 (MSR_IA32_FEATURE_CONTROL);\r
214 if (FeatureCtrl.Bits.LmceOn == 0) {\r
215 return FALSE;\r
216 }\r
217\r
218 McgExtCtrl.Uint64 = AsmReadMsr64 (MSR_IA32_MCG_EXT_CTL);\r
219 return (BOOLEAN) (McgExtCtrl.Bits.LMCE_EN == 1);\r
220}\r
221\r
222/**\r
223 Return if Local machine check exception signaled.\r
224\r
225 Indicates (when set) that a local machine check exception was generated. This indicates that the current machine-check event was\r
226 delivered to only the logical processor.\r
227\r
228 @retval TRUE LMCE was signaled.\r
229 @retval FALSE LMCE was not signaled.\r
230\r
231**/\r
232BOOLEAN\r
233IsLmceSignaled (\r
234 VOID\r
235 )\r
236{\r
237 MSR_IA32_MCG_STATUS_REGISTER McgStatus;\r
238\r
239 McgStatus.Uint64 = AsmReadMsr64 (MSR_IA32_MCG_STATUS);\r
240 return (BOOLEAN) (McgStatus.Bits.LMCE_S == 1);\r
241}\r
242\r
243/**\r
244 Given timeout constraint, wait for all APs to arrive, and insure when this function returns, no AP will execute normal mode code before\r
245 entering SMM, except SMI disabled APs.\r
246\r
247**/\r
248VOID\r
249SmmWaitForApArrival (\r
250 VOID\r
251 )\r
252{\r
253 UINT64 Timer;\r
254 UINTN Index;\r
255 BOOLEAN LmceEn;\r
256 BOOLEAN LmceSignal;\r
257\r
258 ASSERT (*mSmmMpSyncData->Counter <= mNumberOfCpus);\r
259\r
260 LmceEn = FALSE;\r
261 LmceSignal = FALSE;\r
262 if (mMachineCheckSupported) {\r
263 LmceEn = IsLmceOsEnabled ();\r
264 LmceSignal = IsLmceSignaled();\r
265 }\r
266\r
267 //\r
268 // Platform implementor should choose a timeout value appropriately:\r
269 // - The timeout value should balance the SMM time constrains and the likelihood that delayed CPUs are excluded in the SMM run. Note\r
270 // the SMI Handlers must ALWAYS take into account the cases that not all APs are available in an SMI run.\r
271 // - The timeout value must, in the case of 2nd timeout, be at least long enough to give time for all APs to receive the SMI IPI\r
272 // and either enter SMM or buffer the SMI, to insure there is no CPU running normal mode code when SMI handling starts. This will\r
273 // be TRUE even if a blocked CPU is brought out of the blocked state by a normal mode CPU (before the normal mode CPU received the\r
274 // SMI IPI), because with a buffered SMI, and CPU will enter SMM immediately after it is brought out of the blocked state.\r
275 // - The timeout value must be longer than longest possible IO operation in the system\r
276 //\r
277\r
278 //\r
279 // Sync with APs 1st timeout\r
280 //\r
281 for (Timer = StartSyncTimer ();\r
282 !IsSyncTimerTimeout (Timer) && !(LmceEn && LmceSignal) &&\r
283 !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED | ARRIVAL_EXCEPTION_SMI_DISABLED );\r
284 ) {\r
285 CpuPause ();\r
286 }\r
287\r
288 //\r
289 // Not all APs have arrived, so we need 2nd round of timeout. IPIs should be sent to ALL none present APs,\r
290 // because:\r
291 // a) Delayed AP may have just come out of the delayed state. Blocked AP may have just been brought out of blocked state by some AP running\r
292 // normal mode code. These APs need to be guaranteed to have an SMI pending to insure that once they are out of delayed / blocked state, they\r
293 // enter SMI immediately without executing instructions in normal mode. Note traditional flow requires there are no APs doing normal mode\r
294 // work while SMI handling is on-going.\r
295 // b) As a consequence of SMI IPI sending, (spurious) SMI may occur after this SMM run.\r
296 // c) ** NOTE **: Use SMI disabling feature VERY CAREFULLY (if at all) for traditional flow, because a processor in SMI-disabled state\r
297 // will execute normal mode code, which breaks the traditional SMI handlers' assumption that no APs are doing normal\r
298 // mode work while SMI handling is on-going.\r
299 // d) We don't add code to check SMI disabling status to skip sending IPI to SMI disabled APs, because:\r
300 // - In traditional flow, SMI disabling is discouraged.\r
301 // - In relaxed flow, CheckApArrival() will check SMI disabling status before calling this function.\r
302 // In both cases, adding SMI-disabling checking code increases overhead.\r
303 //\r
304 if (*mSmmMpSyncData->Counter < mNumberOfCpus) {\r
305 //\r
306 // Send SMI IPIs to bring outside processors in\r
307 //\r
308 for (Index = mMaxNumberOfCpus; Index-- > 0;) {\r
309 if (!(*(mSmmMpSyncData->CpuData[Index].Present)) && gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID) {\r
310 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId);\r
311 }\r
312 }\r
313\r
314 //\r
315 // Sync with APs 2nd timeout.\r
316 //\r
317 for (Timer = StartSyncTimer ();\r
318 !IsSyncTimerTimeout (Timer) &&\r
319 !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED | ARRIVAL_EXCEPTION_SMI_DISABLED );\r
320 ) {\r
321 CpuPause ();\r
322 }\r
323 }\r
324\r
325 return;\r
326}\r
327\r
328\r
329/**\r
330 Replace OS MTRR's with SMI MTRR's.\r
331\r
332 @param CpuIndex Processor Index\r
333\r
334**/\r
335VOID\r
336ReplaceOSMtrrs (\r
337 IN UINTN CpuIndex\r
338 )\r
339{\r
340 SmmCpuFeaturesDisableSmrr ();\r
341\r
342 //\r
343 // Replace all MTRRs registers\r
344 //\r
345 MtrrSetAllMtrrs (&gSmiMtrrs);\r
346}\r
347\r
348/**\r
349 Wheck whether task has been finished by all APs.\r
350\r
351 @param BlockMode Whether did it in block mode or non-block mode.\r
352\r
353 @retval TRUE Task has been finished by all APs.\r
354 @retval FALSE Task not has been finished by all APs.\r
355\r
356**/\r
357BOOLEAN\r
358WaitForAllAPsNotBusy (\r
359 IN BOOLEAN BlockMode\r
360 )\r
361{\r
362 UINTN Index;\r
363\r
364 for (Index = mMaxNumberOfCpus; Index-- > 0;) {\r
365 //\r
366 // Ignore BSP and APs which not call in SMM.\r
367 //\r
368 if (!IsPresentAp(Index)) {\r
369 continue;\r
370 }\r
371\r
372 if (BlockMode) {\r
373 AcquireSpinLock(mSmmMpSyncData->CpuData[Index].Busy);\r
374 ReleaseSpinLock(mSmmMpSyncData->CpuData[Index].Busy);\r
375 } else {\r
376 if (AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[Index].Busy)) {\r
377 ReleaseSpinLock(mSmmMpSyncData->CpuData[Index].Busy);\r
378 } else {\r
379 return FALSE;\r
380 }\r
381 }\r
382 }\r
383\r
384 return TRUE;\r
385}\r
386\r
387/**\r
388 Check whether it is an present AP.\r
389\r
390 @param CpuIndex The AP index which calls this function.\r
391\r
392 @retval TRUE It's a present AP.\r
393 @retval TRUE This is not an AP or it is not present.\r
394\r
395**/\r
396BOOLEAN\r
397IsPresentAp (\r
398 IN UINTN CpuIndex\r
399 )\r
400{\r
401 return ((CpuIndex != gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) &&\r
402 *(mSmmMpSyncData->CpuData[CpuIndex].Present));\r
403}\r
404\r
405/**\r
406 Check whether execute in single AP or all APs.\r
407\r
408 Compare two Tokens used by different APs to know whether in StartAllAps call.\r
409\r
410 Whether is an valid AP base on AP's Present flag.\r
411\r
412 @retval TRUE IN StartAllAps call.\r
413 @retval FALSE Not in StartAllAps call.\r
414\r
415**/\r
416BOOLEAN\r
417InStartAllApsCall (\r
418 VOID\r
419 )\r
420{\r
421 UINTN ApIndex;\r
422 UINTN ApIndex2;\r
423\r
424 for (ApIndex = mMaxNumberOfCpus; ApIndex-- > 0;) {\r
425 if (IsPresentAp (ApIndex) && (mSmmMpSyncData->CpuData[ApIndex].Token != NULL)) {\r
426 for (ApIndex2 = ApIndex; ApIndex2-- > 0;) {\r
427 if (IsPresentAp (ApIndex2) && (mSmmMpSyncData->CpuData[ApIndex2].Token != NULL)) {\r
428 return mSmmMpSyncData->CpuData[ApIndex2].Token == mSmmMpSyncData->CpuData[ApIndex].Token;\r
429 }\r
430 }\r
431 }\r
432 }\r
433\r
434 return FALSE;\r
435}\r
436\r
437/**\r
438 Clean up the status flags used during executing the procedure.\r
439\r
440 @param CpuIndex The AP index which calls this function.\r
441\r
442**/\r
443VOID\r
444ReleaseToken (\r
445 IN UINTN CpuIndex\r
446 )\r
447{\r
448 UINTN Index;\r
449 BOOLEAN Released;\r
450\r
451 if (InStartAllApsCall ()) {\r
452 //\r
453 // In Start All APs mode, make sure all APs have finished task.\r
454 //\r
455 if (WaitForAllAPsNotBusy (FALSE)) {\r
456 //\r
457 // Clean the flags update in the function call.\r
458 //\r
459 Released = FALSE;\r
460 for (Index = mMaxNumberOfCpus; Index-- > 0;) {\r
461 //\r
462 // Only In SMM APs need to be clean up.\r
463 //\r
464 if (mSmmMpSyncData->CpuData[Index].Present && mSmmMpSyncData->CpuData[Index].Token != NULL) {\r
465 if (!Released) {\r
466 ReleaseSpinLock (mSmmMpSyncData->CpuData[Index].Token);\r
467 Released = TRUE;\r
468 }\r
469 mSmmMpSyncData->CpuData[Index].Token = NULL;\r
470 }\r
471 }\r
472 }\r
473 } else {\r
474 //\r
475 // In single AP mode.\r
476 //\r
477 if (mSmmMpSyncData->CpuData[CpuIndex].Token != NULL) {\r
478 ReleaseSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Token);\r
479 mSmmMpSyncData->CpuData[CpuIndex].Token = NULL;\r
480 }\r
481 }\r
482}\r
483\r
484/**\r
485 Free the tokens in the maintained list.\r
486\r
487**/\r
488VOID\r
489FreeTokens (\r
490 VOID\r
491 )\r
492{\r
493 LIST_ENTRY *Link;\r
494 PROCEDURE_TOKEN *ProcToken;\r
495\r
496 while (!IsListEmpty (&gSmmCpuPrivate->TokenList)) {\r
497 Link = GetFirstNode (&gSmmCpuPrivate->TokenList);\r
498 ProcToken = PROCEDURE_TOKEN_FROM_LINK (Link);\r
499\r
500 RemoveEntryList (&ProcToken->Link);\r
501\r
502 FreePool ((VOID *)ProcToken->ProcedureToken);\r
503 FreePool (ProcToken);\r
504 }\r
505}\r
506\r
507/**\r
508 SMI handler for BSP.\r
509\r
510 @param CpuIndex BSP processor Index\r
511 @param SyncMode SMM MP sync mode\r
512\r
513**/\r
514VOID\r
515BSPHandler (\r
516 IN UINTN CpuIndex,\r
517 IN SMM_CPU_SYNC_MODE SyncMode\r
518 )\r
519{\r
520 UINTN Index;\r
521 MTRR_SETTINGS Mtrrs;\r
522 UINTN ApCount;\r
523 BOOLEAN ClearTopLevelSmiResult;\r
524 UINTN PresentCount;\r
525\r
526 ASSERT (CpuIndex == mSmmMpSyncData->BspIndex);\r
527 ApCount = 0;\r
528\r
529 //\r
530 // Flag BSP's presence\r
531 //\r
532 *mSmmMpSyncData->InsideSmm = TRUE;\r
533\r
534 //\r
535 // Initialize Debug Agent to start source level debug in BSP handler\r
536 //\r
537 InitializeDebugAgent (DEBUG_AGENT_INIT_ENTER_SMI, NULL, NULL);\r
538\r
539 //\r
540 // Mark this processor's presence\r
541 //\r
542 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = TRUE;\r
543\r
544 //\r
545 // Clear platform top level SMI status bit before calling SMI handlers. If\r
546 // we cleared it after SMI handlers are run, we would miss the SMI that\r
547 // occurs after SMI handlers are done and before SMI status bit is cleared.\r
548 //\r
549 ClearTopLevelSmiResult = ClearTopLevelSmiStatus();\r
550 ASSERT (ClearTopLevelSmiResult == TRUE);\r
551\r
552 //\r
553 // Set running processor index\r
554 //\r
555 gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu = CpuIndex;\r
556\r
557 //\r
558 // If Traditional Sync Mode or need to configure MTRRs: gather all available APs.\r
559 //\r
560 if (SyncMode == SmmCpuSyncModeTradition || SmmCpuFeaturesNeedConfigureMtrrs()) {\r
561\r
562 //\r
563 // Wait for APs to arrive\r
564 //\r
565 SmmWaitForApArrival();\r
566\r
567 //\r
568 // Lock the counter down and retrieve the number of APs\r
569 //\r
570 *mSmmMpSyncData->AllCpusInSync = TRUE;\r
571 ApCount = LockdownSemaphore (mSmmMpSyncData->Counter) - 1;\r
572\r
573 //\r
574 // Wait for all APs to get ready for programming MTRRs\r
575 //\r
576 WaitForAllAPs (ApCount);\r
577\r
578 if (SmmCpuFeaturesNeedConfigureMtrrs()) {\r
579 //\r
580 // Signal all APs it's time for backup MTRRs\r
581 //\r
582 ReleaseAllAPs ();\r
583\r
584 //\r
585 // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at\r
586 // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set\r
587 // to a large enough value to avoid this situation.\r
588 // Note: For HT capable CPUs, threads within a core share the same set of MTRRs.\r
589 // We do the backup first and then set MTRR to avoid race condition for threads\r
590 // in the same core.\r
591 //\r
592 MtrrGetAllMtrrs(&Mtrrs);\r
593\r
594 //\r
595 // Wait for all APs to complete their MTRR saving\r
596 //\r
597 WaitForAllAPs (ApCount);\r
598\r
599 //\r
600 // Let all processors program SMM MTRRs together\r
601 //\r
602 ReleaseAllAPs ();\r
603\r
604 //\r
605 // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at\r
606 // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set\r
607 // to a large enough value to avoid this situation.\r
608 //\r
609 ReplaceOSMtrrs (CpuIndex);\r
610\r
611 //\r
612 // Wait for all APs to complete their MTRR programming\r
613 //\r
614 WaitForAllAPs (ApCount);\r
615 }\r
616 }\r
617\r
618 //\r
619 // The BUSY lock is initialized to Acquired state\r
620 //\r
621 AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
622\r
623 //\r
624 // Perform the pre tasks\r
625 //\r
626 PerformPreTasks ();\r
627\r
628 //\r
629 // Invoke SMM Foundation EntryPoint with the processor information context.\r
630 //\r
631 gSmmCpuPrivate->SmmCoreEntry (&gSmmCpuPrivate->SmmCoreEntryContext);\r
632\r
633 //\r
634 // Make sure all APs have completed their pending none-block tasks\r
635 //\r
636 WaitForAllAPsNotBusy (TRUE);\r
637\r
638 //\r
639 // Perform the remaining tasks\r
640 //\r
641 PerformRemainingTasks ();\r
642\r
643 //\r
644 // If Relaxed-AP Sync Mode: gather all available APs after BSP SMM handlers are done, and\r
645 // make those APs to exit SMI synchronously. APs which arrive later will be excluded and\r
646 // will run through freely.\r
647 //\r
648 if (SyncMode != SmmCpuSyncModeTradition && !SmmCpuFeaturesNeedConfigureMtrrs()) {\r
649\r
650 //\r
651 // Lock the counter down and retrieve the number of APs\r
652 //\r
653 *mSmmMpSyncData->AllCpusInSync = TRUE;\r
654 ApCount = LockdownSemaphore (mSmmMpSyncData->Counter) - 1;\r
655 //\r
656 // Make sure all APs have their Present flag set\r
657 //\r
658 while (TRUE) {\r
659 PresentCount = 0;\r
660 for (Index = mMaxNumberOfCpus; Index-- > 0;) {\r
661 if (*(mSmmMpSyncData->CpuData[Index].Present)) {\r
662 PresentCount ++;\r
663 }\r
664 }\r
665 if (PresentCount > ApCount) {\r
666 break;\r
667 }\r
668 }\r
669 }\r
670\r
671 //\r
672 // Notify all APs to exit\r
673 //\r
674 *mSmmMpSyncData->InsideSmm = FALSE;\r
675 ReleaseAllAPs ();\r
676\r
677 //\r
678 // Wait for all APs to complete their pending tasks\r
679 //\r
680 WaitForAllAPs (ApCount);\r
681\r
682 if (SmmCpuFeaturesNeedConfigureMtrrs()) {\r
683 //\r
684 // Signal APs to restore MTRRs\r
685 //\r
686 ReleaseAllAPs ();\r
687\r
688 //\r
689 // Restore OS MTRRs\r
690 //\r
691 SmmCpuFeaturesReenableSmrr ();\r
692 MtrrSetAllMtrrs(&Mtrrs);\r
693\r
694 //\r
695 // Wait for all APs to complete MTRR programming\r
696 //\r
697 WaitForAllAPs (ApCount);\r
698 }\r
699\r
700 //\r
701 // Stop source level debug in BSP handler, the code below will not be\r
702 // debugged.\r
703 //\r
704 InitializeDebugAgent (DEBUG_AGENT_INIT_EXIT_SMI, NULL, NULL);\r
705\r
706 //\r
707 // Signal APs to Reset states/semaphore for this processor\r
708 //\r
709 ReleaseAllAPs ();\r
710\r
711 //\r
712 // Perform pending operations for hot-plug\r
713 //\r
714 SmmCpuUpdate ();\r
715\r
716 //\r
717 // Clear the Present flag of BSP\r
718 //\r
719 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;\r
720\r
721 //\r
722 // Gather APs to exit SMM synchronously. Note the Present flag is cleared by now but\r
723 // WaitForAllAps does not depend on the Present flag.\r
724 //\r
725 WaitForAllAPs (ApCount);\r
726\r
727 //\r
728 // Clean the tokens buffer.\r
729 //\r
730 FreeTokens ();\r
731\r
732 //\r
733 // Reset BspIndex to -1, meaning BSP has not been elected.\r
734 //\r
735 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {\r
736 mSmmMpSyncData->BspIndex = (UINT32)-1;\r
737 }\r
738\r
739 //\r
740 // Allow APs to check in from this point on\r
741 //\r
742 *mSmmMpSyncData->Counter = 0;\r
743 *mSmmMpSyncData->AllCpusInSync = FALSE;\r
744}\r
745\r
746/**\r
747 SMI handler for AP.\r
748\r
749 @param CpuIndex AP processor Index.\r
750 @param ValidSmi Indicates that current SMI is a valid SMI or not.\r
751 @param SyncMode SMM MP sync mode.\r
752\r
753**/\r
754VOID\r
755APHandler (\r
756 IN UINTN CpuIndex,\r
757 IN BOOLEAN ValidSmi,\r
758 IN SMM_CPU_SYNC_MODE SyncMode\r
759 )\r
760{\r
761 UINT64 Timer;\r
762 UINTN BspIndex;\r
763 MTRR_SETTINGS Mtrrs;\r
764 EFI_STATUS ProcedureStatus;\r
765\r
766 //\r
767 // Timeout BSP\r
768 //\r
769 for (Timer = StartSyncTimer ();\r
770 !IsSyncTimerTimeout (Timer) &&\r
771 !(*mSmmMpSyncData->InsideSmm);\r
772 ) {\r
773 CpuPause ();\r
774 }\r
775\r
776 if (!(*mSmmMpSyncData->InsideSmm)) {\r
777 //\r
778 // BSP timeout in the first round\r
779 //\r
780 if (mSmmMpSyncData->BspIndex != -1) {\r
781 //\r
782 // BSP Index is known\r
783 //\r
784 BspIndex = mSmmMpSyncData->BspIndex;\r
785 ASSERT (CpuIndex != BspIndex);\r
786\r
787 //\r
788 // Send SMI IPI to bring BSP in\r
789 //\r
790 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[BspIndex].ProcessorId);\r
791\r
792 //\r
793 // Now clock BSP for the 2nd time\r
794 //\r
795 for (Timer = StartSyncTimer ();\r
796 !IsSyncTimerTimeout (Timer) &&\r
797 !(*mSmmMpSyncData->InsideSmm);\r
798 ) {\r
799 CpuPause ();\r
800 }\r
801\r
802 if (!(*mSmmMpSyncData->InsideSmm)) {\r
803 //\r
804 // Give up since BSP is unable to enter SMM\r
805 // and signal the completion of this AP\r
806 WaitForSemaphore (mSmmMpSyncData->Counter);\r
807 return;\r
808 }\r
809 } else {\r
810 //\r
811 // Don't know BSP index. Give up without sending IPI to BSP.\r
812 //\r
813 WaitForSemaphore (mSmmMpSyncData->Counter);\r
814 return;\r
815 }\r
816 }\r
817\r
818 //\r
819 // BSP is available\r
820 //\r
821 BspIndex = mSmmMpSyncData->BspIndex;\r
822 ASSERT (CpuIndex != BspIndex);\r
823\r
824 //\r
825 // Mark this processor's presence\r
826 //\r
827 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = TRUE;\r
828\r
829 if (SyncMode == SmmCpuSyncModeTradition || SmmCpuFeaturesNeedConfigureMtrrs()) {\r
830 //\r
831 // Notify BSP of arrival at this point\r
832 //\r
833 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
834 }\r
835\r
836 if (SmmCpuFeaturesNeedConfigureMtrrs()) {\r
837 //\r
838 // Wait for the signal from BSP to backup MTRRs\r
839 //\r
840 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r
841\r
842 //\r
843 // Backup OS MTRRs\r
844 //\r
845 MtrrGetAllMtrrs(&Mtrrs);\r
846\r
847 //\r
848 // Signal BSP the completion of this AP\r
849 //\r
850 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
851\r
852 //\r
853 // Wait for BSP's signal to program MTRRs\r
854 //\r
855 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r
856\r
857 //\r
858 // Replace OS MTRRs with SMI MTRRs\r
859 //\r
860 ReplaceOSMtrrs (CpuIndex);\r
861\r
862 //\r
863 // Signal BSP the completion of this AP\r
864 //\r
865 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
866 }\r
867\r
868 while (TRUE) {\r
869 //\r
870 // Wait for something to happen\r
871 //\r
872 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r
873\r
874 //\r
875 // Check if BSP wants to exit SMM\r
876 //\r
877 if (!(*mSmmMpSyncData->InsideSmm)) {\r
878 break;\r
879 }\r
880\r
881 //\r
882 // BUSY should be acquired by SmmStartupThisAp()\r
883 //\r
884 ASSERT (\r
885 !AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[CpuIndex].Busy)\r
886 );\r
887\r
888 //\r
889 // Invoke the scheduled procedure\r
890 //\r
891 ProcedureStatus = (*mSmmMpSyncData->CpuData[CpuIndex].Procedure) (\r
892 (VOID*)mSmmMpSyncData->CpuData[CpuIndex].Parameter\r
893 );\r
894 if (mSmmMpSyncData->CpuData[CpuIndex].Status != NULL) {\r
895 *mSmmMpSyncData->CpuData[CpuIndex].Status = ProcedureStatus;\r
896 }\r
897\r
898 //\r
899 // Release BUSY\r
900 //\r
901 ReleaseSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
902\r
903 ReleaseToken (CpuIndex);\r
904 }\r
905\r
906 if (SmmCpuFeaturesNeedConfigureMtrrs()) {\r
907 //\r
908 // Notify BSP the readiness of this AP to program MTRRs\r
909 //\r
910 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
911\r
912 //\r
913 // Wait for the signal from BSP to program MTRRs\r
914 //\r
915 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r
916\r
917 //\r
918 // Restore OS MTRRs\r
919 //\r
920 SmmCpuFeaturesReenableSmrr ();\r
921 MtrrSetAllMtrrs(&Mtrrs);\r
922 }\r
923\r
924 //\r
925 // Notify BSP the readiness of this AP to Reset states/semaphore for this processor\r
926 //\r
927 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
928\r
929 //\r
930 // Wait for the signal from BSP to Reset states/semaphore for this processor\r
931 //\r
932 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r
933\r
934 //\r
935 // Reset states/semaphore for this processor\r
936 //\r
937 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;\r
938\r
939 //\r
940 // Notify BSP the readiness of this AP to exit SMM\r
941 //\r
942 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
943\r
944}\r
945\r
946/**\r
947 Create 4G PageTable in SMRAM.\r
948\r
949 @param[in] Is32BitPageTable Whether the page table is 32-bit PAE\r
950 @return PageTable Address\r
951\r
952**/\r
953UINT32\r
954Gen4GPageTable (\r
955 IN BOOLEAN Is32BitPageTable\r
956 )\r
957{\r
958 VOID *PageTable;\r
959 UINTN Index;\r
960 UINT64 *Pte;\r
961 UINTN PagesNeeded;\r
962 UINTN Low2MBoundary;\r
963 UINTN High2MBoundary;\r
964 UINTN Pages;\r
965 UINTN GuardPage;\r
966 UINT64 *Pdpte;\r
967 UINTN PageIndex;\r
968 UINTN PageAddress;\r
969\r
970 Low2MBoundary = 0;\r
971 High2MBoundary = 0;\r
972 PagesNeeded = 0;\r
973 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
974 //\r
975 // Add one more page for known good stack, then find the lower 2MB aligned address.\r
976 //\r
977 Low2MBoundary = (mSmmStackArrayBase + EFI_PAGE_SIZE) & ~(SIZE_2MB-1);\r
978 //\r
979 // Add two more pages for known good stack and stack guard page,\r
980 // then find the lower 2MB aligned address.\r
981 //\r
982 High2MBoundary = (mSmmStackArrayEnd - mSmmStackSize + EFI_PAGE_SIZE * 2) & ~(SIZE_2MB-1);\r
983 PagesNeeded = ((High2MBoundary - Low2MBoundary) / SIZE_2MB) + 1;\r
984 }\r
985 //\r
986 // Allocate the page table\r
987 //\r
988 PageTable = AllocatePageTableMemory (5 + PagesNeeded);\r
989 ASSERT (PageTable != NULL);\r
990\r
991 PageTable = (VOID *)((UINTN)PageTable);\r
992 Pte = (UINT64*)PageTable;\r
993\r
994 //\r
995 // Zero out all page table entries first\r
996 //\r
997 ZeroMem (Pte, EFI_PAGES_TO_SIZE (1));\r
998\r
999 //\r
1000 // Set Page Directory Pointers\r
1001 //\r
1002 for (Index = 0; Index < 4; Index++) {\r
1003 Pte[Index] = ((UINTN)PageTable + EFI_PAGE_SIZE * (Index + 1)) | mAddressEncMask |\r
1004 (Is32BitPageTable ? IA32_PAE_PDPTE_ATTRIBUTE_BITS : PAGE_ATTRIBUTE_BITS);\r
1005 }\r
1006 Pte += EFI_PAGE_SIZE / sizeof (*Pte);\r
1007\r
1008 //\r
1009 // Fill in Page Directory Entries\r
1010 //\r
1011 for (Index = 0; Index < EFI_PAGE_SIZE * 4 / sizeof (*Pte); Index++) {\r
1012 Pte[Index] = (Index << 21) | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;\r
1013 }\r
1014\r
1015 Pdpte = (UINT64*)PageTable;\r
1016 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
1017 Pages = (UINTN)PageTable + EFI_PAGES_TO_SIZE (5);\r
1018 GuardPage = mSmmStackArrayBase + EFI_PAGE_SIZE;\r
1019 for (PageIndex = Low2MBoundary; PageIndex <= High2MBoundary; PageIndex += SIZE_2MB) {\r
1020 Pte = (UINT64*)(UINTN)(Pdpte[BitFieldRead32 ((UINT32)PageIndex, 30, 31)] & ~mAddressEncMask & ~(EFI_PAGE_SIZE - 1));\r
1021 Pte[BitFieldRead32 ((UINT32)PageIndex, 21, 29)] = (UINT64)Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
1022 //\r
1023 // Fill in Page Table Entries\r
1024 //\r
1025 Pte = (UINT64*)Pages;\r
1026 PageAddress = PageIndex;\r
1027 for (Index = 0; Index < EFI_PAGE_SIZE / sizeof (*Pte); Index++) {\r
1028 if (PageAddress == GuardPage) {\r
1029 //\r
1030 // Mark the guard page as non-present\r
1031 //\r
1032 Pte[Index] = PageAddress | mAddressEncMask;\r
1033 GuardPage += mSmmStackSize;\r
1034 if (GuardPage > mSmmStackArrayEnd) {\r
1035 GuardPage = 0;\r
1036 }\r
1037 } else {\r
1038 Pte[Index] = PageAddress | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
1039 }\r
1040 PageAddress+= EFI_PAGE_SIZE;\r
1041 }\r
1042 Pages += EFI_PAGE_SIZE;\r
1043 }\r
1044 }\r
1045\r
1046 if ((PcdGet8 (PcdNullPointerDetectionPropertyMask) & BIT1) != 0) {\r
1047 Pte = (UINT64*)(UINTN)(Pdpte[0] & ~mAddressEncMask & ~(EFI_PAGE_SIZE - 1));\r
1048 if ((Pte[0] & IA32_PG_PS) == 0) {\r
1049 // 4K-page entries are already mapped. Just hide the first one anyway.\r
1050 Pte = (UINT64*)(UINTN)(Pte[0] & ~mAddressEncMask & ~(EFI_PAGE_SIZE - 1));\r
1051 Pte[0] &= ~(UINT64)IA32_PG_P; // Hide page 0\r
1052 } else {\r
1053 // Create 4K-page entries\r
1054 Pages = (UINTN)AllocatePageTableMemory (1);\r
1055 ASSERT (Pages != 0);\r
1056\r
1057 Pte[0] = (UINT64)(Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS);\r
1058\r
1059 Pte = (UINT64*)Pages;\r
1060 PageAddress = 0;\r
1061 Pte[0] = PageAddress | mAddressEncMask; // Hide page 0 but present left\r
1062 for (Index = 1; Index < EFI_PAGE_SIZE / sizeof (*Pte); Index++) {\r
1063 PageAddress += EFI_PAGE_SIZE;\r
1064 Pte[Index] = PageAddress | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
1065 }\r
1066 }\r
1067 }\r
1068\r
1069 return (UINT32)(UINTN)PageTable;\r
1070}\r
1071\r
1072/**\r
1073 Checks whether the input token is the current used token.\r
1074\r
1075 @param[in] Token This parameter describes the token that was passed into DispatchProcedure or\r
1076 BroadcastProcedure.\r
1077\r
1078 @retval TRUE The input token is the current used token.\r
1079 @retval FALSE The input token is not the current used token.\r
1080**/\r
1081BOOLEAN\r
1082IsTokenInUse (\r
1083 IN SPIN_LOCK *Token\r
1084 )\r
1085{\r
1086 LIST_ENTRY *Link;\r
1087 PROCEDURE_TOKEN *ProcToken;\r
1088\r
1089 if (Token == NULL) {\r
1090 return FALSE;\r
1091 }\r
1092\r
1093 Link = GetFirstNode (&gSmmCpuPrivate->TokenList);\r
1094 while (!IsNull (&gSmmCpuPrivate->TokenList, Link)) {\r
1095 ProcToken = PROCEDURE_TOKEN_FROM_LINK (Link);\r
1096\r
1097 if (ProcToken->ProcedureToken == Token) {\r
1098 return TRUE;\r
1099 }\r
1100\r
1101 Link = GetNextNode (&gSmmCpuPrivate->TokenList, Link);\r
1102 }\r
1103\r
1104 return FALSE;\r
1105}\r
1106\r
1107/**\r
1108 create token and save it to the maintain list.\r
1109\r
1110 @retval return the spin lock used as token.\r
1111\r
1112**/\r
1113SPIN_LOCK *\r
1114CreateToken (\r
1115 VOID\r
1116 )\r
1117{\r
1118 PROCEDURE_TOKEN *ProcToken;\r
1119 SPIN_LOCK *CpuToken;\r
1120 UINTN SpinLockSize;\r
1121\r
1122 SpinLockSize = GetSpinLockProperties ();\r
1123 CpuToken = AllocatePool (SpinLockSize);\r
1124 ASSERT (CpuToken != NULL);\r
1125 InitializeSpinLock (CpuToken);\r
1126 AcquireSpinLock (CpuToken);\r
1127\r
1128 ProcToken = AllocatePool (sizeof (PROCEDURE_TOKEN));\r
1129 ASSERT (ProcToken != NULL);\r
1130 ProcToken->Signature = PROCEDURE_TOKEN_SIGNATURE;\r
1131 ProcToken->ProcedureToken = CpuToken;\r
1132\r
1133 InsertTailList (&gSmmCpuPrivate->TokenList, &ProcToken->Link);\r
1134\r
1135 return CpuToken;\r
1136}\r
1137\r
1138/**\r
1139 Checks status of specified AP.\r
1140\r
1141 This function checks whether the specified AP has finished the task assigned\r
1142 by StartupThisAP(), and whether timeout expires.\r
1143\r
1144 @param[in] Token This parameter describes the token that was passed into DispatchProcedure or\r
1145 BroadcastProcedure.\r
1146\r
1147 @retval EFI_SUCCESS Specified AP has finished task assigned by StartupThisAPs().\r
1148 @retval EFI_NOT_READY Specified AP has not finished task and timeout has not expired.\r
1149**/\r
1150EFI_STATUS\r
1151IsApReady (\r
1152 IN SPIN_LOCK *Token\r
1153 )\r
1154{\r
1155 if (AcquireSpinLockOrFail (Token)) {\r
1156 ReleaseSpinLock (Token);\r
1157 return EFI_SUCCESS;\r
1158 }\r
1159\r
1160 return EFI_NOT_READY;\r
1161}\r
1162\r
1163/**\r
1164 Schedule a procedure to run on the specified CPU.\r
1165\r
1166 @param[in] Procedure The address of the procedure to run\r
1167 @param[in] CpuIndex Target CPU Index\r
1168 @param[in,out] ProcArguments The parameter to pass to the procedure\r
1169 @param[in] Token This is an optional parameter that allows the caller to execute the\r
1170 procedure in a blocking or non-blocking fashion. If it is NULL the\r
1171 call is blocking, and the call will not return until the AP has\r
1172 completed the procedure. If the token is not NULL, the call will\r
1173 return immediately. The caller can check whether the procedure has\r
1174 completed with CheckOnProcedure or WaitForProcedure.\r
1175 @param[in] TimeoutInMicroseconds Indicates the time limit in microseconds for the APs to finish\r
1176 execution of Procedure, either for blocking or non-blocking mode.\r
1177 Zero means infinity. If the timeout expires before all APs return\r
1178 from Procedure, then Procedure on the failed APs is terminated. If\r
1179 the timeout expires in blocking mode, the call returns EFI_TIMEOUT.\r
1180 If the timeout expires in non-blocking mode, the timeout determined\r
1181 can be through CheckOnProcedure or WaitForProcedure.\r
1182 Note that timeout support is optional. Whether an implementation\r
1183 supports this feature can be determined via the Attributes data\r
1184 member.\r
1185 @param[in,out] CpuStatus This optional pointer may be used to get the status code returned\r
1186 by Procedure when it completes execution on the target AP, or with\r
1187 EFI_TIMEOUT if the Procedure fails to complete within the optional\r
1188 timeout. The implementation will update this variable with\r
1189 EFI_NOT_READY prior to starting Procedure on the target AP.\r
1190\r
1191 @retval EFI_INVALID_PARAMETER CpuNumber not valid\r
1192 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP\r
1193 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM\r
1194 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy\r
1195 @retval EFI_SUCCESS The procedure has been successfully scheduled\r
1196\r
1197**/\r
1198EFI_STATUS\r
1199InternalSmmStartupThisAp (\r
1200 IN EFI_AP_PROCEDURE2 Procedure,\r
1201 IN UINTN CpuIndex,\r
1202 IN OUT VOID *ProcArguments OPTIONAL,\r
1203 IN MM_COMPLETION *Token,\r
1204 IN UINTN TimeoutInMicroseconds,\r
1205 IN OUT EFI_STATUS *CpuStatus\r
1206 )\r
1207{\r
1208 if (CpuIndex >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus) {\r
1209 DEBUG((DEBUG_ERROR, "CpuIndex(%d) >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus(%d)\n", CpuIndex, gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus));\r
1210 return EFI_INVALID_PARAMETER;\r
1211 }\r
1212 if (CpuIndex == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) {\r
1213 DEBUG((DEBUG_ERROR, "CpuIndex(%d) == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu\n", CpuIndex));\r
1214 return EFI_INVALID_PARAMETER;\r
1215 }\r
1216 if (gSmmCpuPrivate->ProcessorInfo[CpuIndex].ProcessorId == INVALID_APIC_ID) {\r
1217 return EFI_INVALID_PARAMETER;\r
1218 }\r
1219 if (!(*(mSmmMpSyncData->CpuData[CpuIndex].Present))) {\r
1220 if (mSmmMpSyncData->EffectiveSyncMode == SmmCpuSyncModeTradition) {\r
1221 DEBUG((DEBUG_ERROR, "!mSmmMpSyncData->CpuData[%d].Present\n", CpuIndex));\r
1222 }\r
1223 return EFI_INVALID_PARAMETER;\r
1224 }\r
1225 if (gSmmCpuPrivate->Operation[CpuIndex] == SmmCpuRemove) {\r
1226 if (!FeaturePcdGet (PcdCpuHotPlugSupport)) {\r
1227 DEBUG((DEBUG_ERROR, "gSmmCpuPrivate->Operation[%d] == SmmCpuRemove\n", CpuIndex));\r
1228 }\r
1229 return EFI_INVALID_PARAMETER;\r
1230 }\r
1231 if ((TimeoutInMicroseconds != 0) && ((mSmmMp.Attributes & EFI_MM_MP_TIMEOUT_SUPPORTED) == 0)) {\r
1232 return EFI_INVALID_PARAMETER;\r
1233 }\r
1234 if (Procedure == NULL) {\r
1235 return EFI_INVALID_PARAMETER;\r
1236 }\r
1237\r
1238 if (Token == NULL) {\r
1239 AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
1240 } else {\r
1241 if (!AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[CpuIndex].Busy)) {\r
1242 DEBUG((DEBUG_ERROR, "Can't acquire mSmmMpSyncData->CpuData[%d].Busy\n", CpuIndex));\r
1243 return EFI_NOT_READY;\r
1244 }\r
1245\r
1246 *Token = (MM_COMPLETION) CreateToken ();\r
1247 }\r
1248\r
1249 mSmmMpSyncData->CpuData[CpuIndex].Procedure = Procedure;\r
1250 mSmmMpSyncData->CpuData[CpuIndex].Parameter = ProcArguments;\r
1251 if (Token != NULL) {\r
1252 mSmmMpSyncData->CpuData[CpuIndex].Token = (SPIN_LOCK *)(*Token);\r
1253 }\r
1254 mSmmMpSyncData->CpuData[CpuIndex].Status = CpuStatus;\r
1255 if (mSmmMpSyncData->CpuData[CpuIndex].Status != NULL) {\r
1256 *mSmmMpSyncData->CpuData[CpuIndex].Status = EFI_NOT_READY;\r
1257 }\r
1258\r
1259 ReleaseSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r
1260\r
1261 if (Token == NULL) {\r
1262 AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
1263 ReleaseSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
1264 }\r
1265\r
1266 return EFI_SUCCESS;\r
1267}\r
1268\r
1269/**\r
1270 Worker function to execute a caller provided function on all enabled APs.\r
1271\r
1272 @param[in] Procedure A pointer to the function to be run on\r
1273 enabled APs of the system.\r
1274 @param[in] TimeoutInMicroseconds Indicates the time limit in microseconds for\r
1275 APs to return from Procedure, either for\r
1276 blocking or non-blocking mode.\r
1277 @param[in,out] ProcedureArguments The parameter passed into Procedure for\r
1278 all APs.\r
1279 @param[in,out] Token This is an optional parameter that allows the caller to execute the\r
1280 procedure in a blocking or non-blocking fashion. If it is NULL the\r
1281 call is blocking, and the call will not return until the AP has\r
1282 completed the procedure. If the token is not NULL, the call will\r
1283 return immediately. The caller can check whether the procedure has\r
1284 completed with CheckOnProcedure or WaitForProcedure.\r
1285 @param[in,out] CPUStatus This optional pointer may be used to get the status code returned\r
1286 by Procedure when it completes execution on the target AP, or with\r
1287 EFI_TIMEOUT if the Procedure fails to complete within the optional\r
1288 timeout. The implementation will update this variable with\r
1289 EFI_NOT_READY prior to starting Procedure on the target AP.\r
1290\r
1291\r
1292 @retval EFI_SUCCESS In blocking mode, all APs have finished before\r
1293 the timeout expired.\r
1294 @retval EFI_SUCCESS In non-blocking mode, function has been dispatched\r
1295 to all enabled APs.\r
1296 @retval others Failed to Startup all APs.\r
1297\r
1298**/\r
1299EFI_STATUS\r
1300InternalSmmStartupAllAPs (\r
1301 IN EFI_AP_PROCEDURE2 Procedure,\r
1302 IN UINTN TimeoutInMicroseconds,\r
1303 IN OUT VOID *ProcedureArguments OPTIONAL,\r
1304 IN OUT MM_COMPLETION *Token,\r
1305 IN OUT EFI_STATUS *CPUStatus\r
1306 )\r
1307{\r
1308 UINTN Index;\r
1309 UINTN CpuCount;\r
1310\r
1311 if ((TimeoutInMicroseconds != 0) && ((mSmmMp.Attributes & EFI_MM_MP_TIMEOUT_SUPPORTED) == 0)) {\r
1312 return EFI_INVALID_PARAMETER;\r
1313 }\r
1314 if (Procedure == NULL) {\r
1315 return EFI_INVALID_PARAMETER;\r
1316 }\r
1317\r
1318 CpuCount = 0;\r
1319 for (Index = mMaxNumberOfCpus; Index-- > 0;) {\r
1320 if (IsPresentAp (Index)) {\r
1321 CpuCount ++;\r
1322\r
1323 if (gSmmCpuPrivate->Operation[Index] == SmmCpuRemove) {\r
1324 return EFI_INVALID_PARAMETER;\r
1325 }\r
1326\r
1327 if (!AcquireSpinLockOrFail(mSmmMpSyncData->CpuData[Index].Busy)) {\r
1328 return EFI_NOT_READY;\r
1329 }\r
1330 ReleaseSpinLock (mSmmMpSyncData->CpuData[Index].Busy);\r
1331 }\r
1332 }\r
1333 if (CpuCount == 0) {\r
1334 return EFI_NOT_STARTED;\r
1335 }\r
1336\r
1337 if (Token != NULL) {\r
1338 *Token = (MM_COMPLETION) CreateToken ();\r
1339 }\r
1340\r
1341 //\r
1342 // Make sure all BUSY should be acquired.\r
1343 //\r
1344 // Because former code already check mSmmMpSyncData->CpuData[***].Busy for each AP.\r
1345 // Here code always use AcquireSpinLock instead of AcquireSpinLockOrFail for not\r
1346 // block mode.\r
1347 //\r
1348 for (Index = mMaxNumberOfCpus; Index-- > 0;) {\r
1349 if (IsPresentAp (Index)) {\r
1350 AcquireSpinLock (mSmmMpSyncData->CpuData[Index].Busy);\r
1351 }\r
1352 }\r
1353\r
1354 for (Index = mMaxNumberOfCpus; Index-- > 0;) {\r
1355 if (IsPresentAp (Index)) {\r
1356 mSmmMpSyncData->CpuData[Index].Procedure = (EFI_AP_PROCEDURE2) Procedure;\r
1357 mSmmMpSyncData->CpuData[Index].Parameter = ProcedureArguments;\r
1358 if (Token != NULL) {\r
1359 mSmmMpSyncData->CpuData[Index].Token = (SPIN_LOCK *)(*Token);\r
1360 }\r
1361 if (CPUStatus != NULL) {\r
1362 mSmmMpSyncData->CpuData[Index].Status = &CPUStatus[Index];\r
1363 if (mSmmMpSyncData->CpuData[Index].Status != NULL) {\r
1364 *mSmmMpSyncData->CpuData[Index].Status = EFI_NOT_READY;\r
1365 }\r
1366 }\r
1367 } else {\r
1368 //\r
1369 // PI spec requirement:\r
1370 // For every excluded processor, the array entry must contain a value of EFI_NOT_STARTED.\r
1371 //\r
1372 if (CPUStatus != NULL) {\r
1373 CPUStatus[Index] = EFI_NOT_STARTED;\r
1374 }\r
1375 }\r
1376 }\r
1377\r
1378 ReleaseAllAPs ();\r
1379\r
1380 if (Token == NULL) {\r
1381 //\r
1382 // Make sure all APs have completed their tasks.\r
1383 //\r
1384 WaitForAllAPsNotBusy (TRUE);\r
1385 }\r
1386\r
1387 return EFI_SUCCESS;\r
1388}\r
1389\r
1390/**\r
1391 ISO C99 6.5.2.2 "Function calls", paragraph 9:\r
1392 If the function is defined with a type that is not compatible with\r
1393 the type (of the expression) pointed to by the expression that\r
1394 denotes the called function, the behavior is undefined.\r
1395\r
1396 So add below wrapper function to convert between EFI_AP_PROCEDURE\r
1397 and EFI_AP_PROCEDURE2.\r
1398\r
1399 Wrapper for Procedures.\r
1400\r
1401 @param[in] Buffer Pointer to PROCEDURE_WRAPPER buffer.\r
1402\r
1403**/\r
1404EFI_STATUS\r
1405EFIAPI\r
1406ProcedureWrapper (\r
1407 IN VOID *Buffer\r
1408 )\r
1409{\r
1410 PROCEDURE_WRAPPER *Wrapper;\r
1411\r
1412 Wrapper = Buffer;\r
1413 Wrapper->Procedure (Wrapper->ProcedureArgument);\r
1414\r
1415 return EFI_SUCCESS;\r
1416}\r
1417\r
1418/**\r
1419 Schedule a procedure to run on the specified CPU in blocking mode.\r
1420\r
1421 @param[in] Procedure The address of the procedure to run\r
1422 @param[in] CpuIndex Target CPU Index\r
1423 @param[in, out] ProcArguments The parameter to pass to the procedure\r
1424\r
1425 @retval EFI_INVALID_PARAMETER CpuNumber not valid\r
1426 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP\r
1427 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM\r
1428 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy\r
1429 @retval EFI_SUCCESS The procedure has been successfully scheduled\r
1430\r
1431**/\r
1432EFI_STATUS\r
1433EFIAPI\r
1434SmmBlockingStartupThisAp (\r
1435 IN EFI_AP_PROCEDURE Procedure,\r
1436 IN UINTN CpuIndex,\r
1437 IN OUT VOID *ProcArguments OPTIONAL\r
1438 )\r
1439{\r
1440 PROCEDURE_WRAPPER Wrapper;\r
1441\r
1442 Wrapper.Procedure = Procedure;\r
1443 Wrapper.ProcedureArgument = ProcArguments;\r
1444\r
1445 //\r
1446 // Use wrapper function to convert EFI_AP_PROCEDURE to EFI_AP_PROCEDURE2.\r
1447 //\r
1448 return InternalSmmStartupThisAp (ProcedureWrapper, CpuIndex, &Wrapper, NULL, 0, NULL);\r
1449}\r
1450\r
1451/**\r
1452 Schedule a procedure to run on the specified CPU.\r
1453\r
1454 @param Procedure The address of the procedure to run\r
1455 @param CpuIndex Target CPU Index\r
1456 @param ProcArguments The parameter to pass to the procedure\r
1457\r
1458 @retval EFI_INVALID_PARAMETER CpuNumber not valid\r
1459 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP\r
1460 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM\r
1461 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy\r
1462 @retval EFI_SUCCESS The procedure has been successfully scheduled\r
1463\r
1464**/\r
1465EFI_STATUS\r
1466EFIAPI\r
1467SmmStartupThisAp (\r
1468 IN EFI_AP_PROCEDURE Procedure,\r
1469 IN UINTN CpuIndex,\r
1470 IN OUT VOID *ProcArguments OPTIONAL\r
1471 )\r
1472{\r
1473 MM_COMPLETION Token;\r
1474\r
1475 gSmmCpuPrivate->ApWrapperFunc[CpuIndex].Procedure = Procedure;\r
1476 gSmmCpuPrivate->ApWrapperFunc[CpuIndex].ProcedureArgument = ProcArguments;\r
1477\r
1478 //\r
1479 // Use wrapper function to convert EFI_AP_PROCEDURE to EFI_AP_PROCEDURE2.\r
1480 //\r
1481 return InternalSmmStartupThisAp (\r
1482 ProcedureWrapper,\r
1483 CpuIndex,\r
1484 &gSmmCpuPrivate->ApWrapperFunc[CpuIndex],\r
1485 FeaturePcdGet (PcdCpuSmmBlockStartupThisAp) ? NULL : &Token,\r
1486 0,\r
1487 NULL\r
1488 );\r
1489}\r
1490\r
1491/**\r
1492 This function sets DR6 & DR7 according to SMM save state, before running SMM C code.\r
1493 They are useful when you want to enable hardware breakpoints in SMM without entry SMM mode.\r
1494\r
1495 NOTE: It might not be appreciated in runtime since it might\r
1496 conflict with OS debugging facilities. Turn them off in RELEASE.\r
1497\r
1498 @param CpuIndex CPU Index\r
1499\r
1500**/\r
1501VOID\r
1502EFIAPI\r
1503CpuSmmDebugEntry (\r
1504 IN UINTN CpuIndex\r
1505 )\r
1506{\r
1507 SMRAM_SAVE_STATE_MAP *CpuSaveState;\r
1508\r
1509 if (FeaturePcdGet (PcdCpuSmmDebug)) {\r
1510 ASSERT(CpuIndex < mMaxNumberOfCpus);\r
1511 CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];\r
1512 if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {\r
1513 AsmWriteDr6 (CpuSaveState->x86._DR6);\r
1514 AsmWriteDr7 (CpuSaveState->x86._DR7);\r
1515 } else {\r
1516 AsmWriteDr6 ((UINTN)CpuSaveState->x64._DR6);\r
1517 AsmWriteDr7 ((UINTN)CpuSaveState->x64._DR7);\r
1518 }\r
1519 }\r
1520}\r
1521\r
1522/**\r
1523 This function restores DR6 & DR7 to SMM save state.\r
1524\r
1525 NOTE: It might not be appreciated in runtime since it might\r
1526 conflict with OS debugging facilities. Turn them off in RELEASE.\r
1527\r
1528 @param CpuIndex CPU Index\r
1529\r
1530**/\r
1531VOID\r
1532EFIAPI\r
1533CpuSmmDebugExit (\r
1534 IN UINTN CpuIndex\r
1535 )\r
1536{\r
1537 SMRAM_SAVE_STATE_MAP *CpuSaveState;\r
1538\r
1539 if (FeaturePcdGet (PcdCpuSmmDebug)) {\r
1540 ASSERT(CpuIndex < mMaxNumberOfCpus);\r
1541 CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];\r
1542 if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {\r
1543 CpuSaveState->x86._DR7 = (UINT32)AsmReadDr7 ();\r
1544 CpuSaveState->x86._DR6 = (UINT32)AsmReadDr6 ();\r
1545 } else {\r
1546 CpuSaveState->x64._DR7 = AsmReadDr7 ();\r
1547 CpuSaveState->x64._DR6 = AsmReadDr6 ();\r
1548 }\r
1549 }\r
1550}\r
1551\r
1552/**\r
1553 C function for SMI entry, each processor comes here upon SMI trigger.\r
1554\r
1555 @param CpuIndex CPU Index\r
1556\r
1557**/\r
1558VOID\r
1559EFIAPI\r
1560SmiRendezvous (\r
1561 IN UINTN CpuIndex\r
1562 )\r
1563{\r
1564 EFI_STATUS Status;\r
1565 BOOLEAN ValidSmi;\r
1566 BOOLEAN IsBsp;\r
1567 BOOLEAN BspInProgress;\r
1568 UINTN Index;\r
1569 UINTN Cr2;\r
1570\r
1571 ASSERT(CpuIndex < mMaxNumberOfCpus);\r
1572\r
1573 //\r
1574 // Save Cr2 because Page Fault exception in SMM may override its value,\r
1575 // when using on-demand paging for above 4G memory.\r
1576 //\r
1577 Cr2 = 0;\r
1578 SaveCr2 (&Cr2);\r
1579\r
1580 //\r
1581 // Call the user register Startup function first.\r
1582 //\r
1583 if (mSmmMpSyncData->StartupProcedure != NULL) {\r
1584 mSmmMpSyncData->StartupProcedure (mSmmMpSyncData->StartupProcArgs);\r
1585 }\r
1586\r
1587 //\r
1588 // Perform CPU specific entry hooks\r
1589 //\r
1590 SmmCpuFeaturesRendezvousEntry (CpuIndex);\r
1591\r
1592 //\r
1593 // Determine if this is a valid SMI\r
1594 //\r
1595 ValidSmi = PlatformValidSmi();\r
1596\r
1597 //\r
1598 // Determine if BSP has been already in progress. Note this must be checked after\r
1599 // ValidSmi because BSP may clear a valid SMI source after checking in.\r
1600 //\r
1601 BspInProgress = *mSmmMpSyncData->InsideSmm;\r
1602\r
1603 if (!BspInProgress && !ValidSmi) {\r
1604 //\r
1605 // If we reach here, it means when we sampled the ValidSmi flag, SMI status had not\r
1606 // been cleared by BSP in a new SMI run (so we have a truly invalid SMI), or SMI\r
1607 // status had been cleared by BSP and an existing SMI run has almost ended. (Note\r
1608 // we sampled ValidSmi flag BEFORE judging BSP-in-progress status.) In both cases, there\r
1609 // is nothing we need to do.\r
1610 //\r
1611 goto Exit;\r
1612 } else {\r
1613 //\r
1614 // Signal presence of this processor\r
1615 //\r
1616 if (ReleaseSemaphore (mSmmMpSyncData->Counter) == 0) {\r
1617 //\r
1618 // BSP has already ended the synchronization, so QUIT!!!\r
1619 //\r
1620\r
1621 //\r
1622 // Wait for BSP's signal to finish SMI\r
1623 //\r
1624 while (*mSmmMpSyncData->AllCpusInSync) {\r
1625 CpuPause ();\r
1626 }\r
1627 goto Exit;\r
1628 } else {\r
1629\r
1630 //\r
1631 // The BUSY lock is initialized to Released state.\r
1632 // This needs to be done early enough to be ready for BSP's SmmStartupThisAp() call.\r
1633 // E.g., with Relaxed AP flow, SmmStartupThisAp() may be called immediately\r
1634 // after AP's present flag is detected.\r
1635 //\r
1636 InitializeSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
1637 }\r
1638\r
1639 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r
1640 ActivateSmmProfile (CpuIndex);\r
1641 }\r
1642\r
1643 if (BspInProgress) {\r
1644 //\r
1645 // BSP has been elected. Follow AP path, regardless of ValidSmi flag\r
1646 // as BSP may have cleared the SMI status\r
1647 //\r
1648 APHandler (CpuIndex, ValidSmi, mSmmMpSyncData->EffectiveSyncMode);\r
1649 } else {\r
1650 //\r
1651 // We have a valid SMI\r
1652 //\r
1653\r
1654 //\r
1655 // Elect BSP\r
1656 //\r
1657 IsBsp = FALSE;\r
1658 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {\r
1659 if (!mSmmMpSyncData->SwitchBsp || mSmmMpSyncData->CandidateBsp[CpuIndex]) {\r
1660 //\r
1661 // Call platform hook to do BSP election\r
1662 //\r
1663 Status = PlatformSmmBspElection (&IsBsp);\r
1664 if (EFI_SUCCESS == Status) {\r
1665 //\r
1666 // Platform hook determines successfully\r
1667 //\r
1668 if (IsBsp) {\r
1669 mSmmMpSyncData->BspIndex = (UINT32)CpuIndex;\r
1670 }\r
1671 } else {\r
1672 //\r
1673 // Platform hook fails to determine, use default BSP election method\r
1674 //\r
1675 InterlockedCompareExchange32 (\r
1676 (UINT32*)&mSmmMpSyncData->BspIndex,\r
1677 (UINT32)-1,\r
1678 (UINT32)CpuIndex\r
1679 );\r
1680 }\r
1681 }\r
1682 }\r
1683\r
1684 //\r
1685 // "mSmmMpSyncData->BspIndex == CpuIndex" means this is the BSP\r
1686 //\r
1687 if (mSmmMpSyncData->BspIndex == CpuIndex) {\r
1688\r
1689 //\r
1690 // Clear last request for SwitchBsp.\r
1691 //\r
1692 if (mSmmMpSyncData->SwitchBsp) {\r
1693 mSmmMpSyncData->SwitchBsp = FALSE;\r
1694 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
1695 mSmmMpSyncData->CandidateBsp[Index] = FALSE;\r
1696 }\r
1697 }\r
1698\r
1699 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r
1700 SmmProfileRecordSmiNum ();\r
1701 }\r
1702\r
1703 //\r
1704 // BSP Handler is always called with a ValidSmi == TRUE\r
1705 //\r
1706 BSPHandler (CpuIndex, mSmmMpSyncData->EffectiveSyncMode);\r
1707 } else {\r
1708 APHandler (CpuIndex, ValidSmi, mSmmMpSyncData->EffectiveSyncMode);\r
1709 }\r
1710 }\r
1711\r
1712 ASSERT (*mSmmMpSyncData->CpuData[CpuIndex].Run == 0);\r
1713\r
1714 //\r
1715 // Wait for BSP's signal to exit SMI\r
1716 //\r
1717 while (*mSmmMpSyncData->AllCpusInSync) {\r
1718 CpuPause ();\r
1719 }\r
1720 }\r
1721\r
1722Exit:\r
1723 SmmCpuFeaturesRendezvousExit (CpuIndex);\r
1724\r
1725 //\r
1726 // Restore Cr2\r
1727 //\r
1728 RestoreCr2 (Cr2);\r
1729}\r
1730\r
1731/**\r
1732 Allocate buffer for SpinLock and Wrapper function buffer.\r
1733\r
1734**/\r
1735VOID\r
1736InitializeDataForMmMp (\r
1737 VOID\r
1738 )\r
1739{\r
1740 gSmmCpuPrivate->ApWrapperFunc = AllocatePool (sizeof (PROCEDURE_WRAPPER) * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus);\r
1741 ASSERT (gSmmCpuPrivate->ApWrapperFunc != NULL);\r
1742\r
1743 InitializeListHead (&gSmmCpuPrivate->TokenList);\r
1744}\r
1745\r
1746/**\r
1747 Allocate buffer for all semaphores and spin locks.\r
1748\r
1749**/\r
1750VOID\r
1751InitializeSmmCpuSemaphores (\r
1752 VOID\r
1753 )\r
1754{\r
1755 UINTN ProcessorCount;\r
1756 UINTN TotalSize;\r
1757 UINTN GlobalSemaphoresSize;\r
1758 UINTN CpuSemaphoresSize;\r
1759 UINTN SemaphoreSize;\r
1760 UINTN Pages;\r
1761 UINTN *SemaphoreBlock;\r
1762 UINTN SemaphoreAddr;\r
1763\r
1764 SemaphoreSize = GetSpinLockProperties ();\r
1765 ProcessorCount = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;\r
1766 GlobalSemaphoresSize = (sizeof (SMM_CPU_SEMAPHORE_GLOBAL) / sizeof (VOID *)) * SemaphoreSize;\r
1767 CpuSemaphoresSize = (sizeof (SMM_CPU_SEMAPHORE_CPU) / sizeof (VOID *)) * ProcessorCount * SemaphoreSize;\r
1768 TotalSize = GlobalSemaphoresSize + CpuSemaphoresSize;\r
1769 DEBUG((EFI_D_INFO, "One Semaphore Size = 0x%x\n", SemaphoreSize));\r
1770 DEBUG((EFI_D_INFO, "Total Semaphores Size = 0x%x\n", TotalSize));\r
1771 Pages = EFI_SIZE_TO_PAGES (TotalSize);\r
1772 SemaphoreBlock = AllocatePages (Pages);\r
1773 ASSERT (SemaphoreBlock != NULL);\r
1774 ZeroMem (SemaphoreBlock, TotalSize);\r
1775\r
1776 SemaphoreAddr = (UINTN)SemaphoreBlock;\r
1777 mSmmCpuSemaphores.SemaphoreGlobal.Counter = (UINT32 *)SemaphoreAddr;\r
1778 SemaphoreAddr += SemaphoreSize;\r
1779 mSmmCpuSemaphores.SemaphoreGlobal.InsideSmm = (BOOLEAN *)SemaphoreAddr;\r
1780 SemaphoreAddr += SemaphoreSize;\r
1781 mSmmCpuSemaphores.SemaphoreGlobal.AllCpusInSync = (BOOLEAN *)SemaphoreAddr;\r
1782 SemaphoreAddr += SemaphoreSize;\r
1783 mSmmCpuSemaphores.SemaphoreGlobal.PFLock = (SPIN_LOCK *)SemaphoreAddr;\r
1784 SemaphoreAddr += SemaphoreSize;\r
1785 mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock\r
1786 = (SPIN_LOCK *)SemaphoreAddr;\r
1787 SemaphoreAddr += SemaphoreSize;\r
1788\r
1789 SemaphoreAddr = (UINTN)SemaphoreBlock + GlobalSemaphoresSize;\r
1790 mSmmCpuSemaphores.SemaphoreCpu.Busy = (SPIN_LOCK *)SemaphoreAddr;\r
1791 SemaphoreAddr += ProcessorCount * SemaphoreSize;\r
1792 mSmmCpuSemaphores.SemaphoreCpu.Run = (UINT32 *)SemaphoreAddr;\r
1793 SemaphoreAddr += ProcessorCount * SemaphoreSize;\r
1794 mSmmCpuSemaphores.SemaphoreCpu.Present = (BOOLEAN *)SemaphoreAddr;\r
1795\r
1796 mPFLock = mSmmCpuSemaphores.SemaphoreGlobal.PFLock;\r
1797 mConfigSmmCodeAccessCheckLock = mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock;\r
1798\r
1799 mSemaphoreSize = SemaphoreSize;\r
1800}\r
1801\r
1802/**\r
1803 Initialize un-cacheable data.\r
1804\r
1805**/\r
1806VOID\r
1807EFIAPI\r
1808InitializeMpSyncData (\r
1809 VOID\r
1810 )\r
1811{\r
1812 UINTN CpuIndex;\r
1813\r
1814 if (mSmmMpSyncData != NULL) {\r
1815 //\r
1816 // mSmmMpSyncDataSize includes one structure of SMM_DISPATCHER_MP_SYNC_DATA, one\r
1817 // CpuData array of SMM_CPU_DATA_BLOCK and one CandidateBsp array of BOOLEAN.\r
1818 //\r
1819 ZeroMem (mSmmMpSyncData, mSmmMpSyncDataSize);\r
1820 mSmmMpSyncData->CpuData = (SMM_CPU_DATA_BLOCK *)((UINT8 *)mSmmMpSyncData + sizeof (SMM_DISPATCHER_MP_SYNC_DATA));\r
1821 mSmmMpSyncData->CandidateBsp = (BOOLEAN *)(mSmmMpSyncData->CpuData + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus);\r
1822 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {\r
1823 //\r
1824 // Enable BSP election by setting BspIndex to -1\r
1825 //\r
1826 mSmmMpSyncData->BspIndex = (UINT32)-1;\r
1827 }\r
1828 mSmmMpSyncData->EffectiveSyncMode = mCpuSmmSyncMode;\r
1829\r
1830 mSmmMpSyncData->Counter = mSmmCpuSemaphores.SemaphoreGlobal.Counter;\r
1831 mSmmMpSyncData->InsideSmm = mSmmCpuSemaphores.SemaphoreGlobal.InsideSmm;\r
1832 mSmmMpSyncData->AllCpusInSync = mSmmCpuSemaphores.SemaphoreGlobal.AllCpusInSync;\r
1833 ASSERT (mSmmMpSyncData->Counter != NULL && mSmmMpSyncData->InsideSmm != NULL &&\r
1834 mSmmMpSyncData->AllCpusInSync != NULL);\r
1835 *mSmmMpSyncData->Counter = 0;\r
1836 *mSmmMpSyncData->InsideSmm = FALSE;\r
1837 *mSmmMpSyncData->AllCpusInSync = FALSE;\r
1838\r
1839 for (CpuIndex = 0; CpuIndex < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; CpuIndex ++) {\r
1840 mSmmMpSyncData->CpuData[CpuIndex].Busy =\r
1841 (SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Busy + mSemaphoreSize * CpuIndex);\r
1842 mSmmMpSyncData->CpuData[CpuIndex].Run =\r
1843 (UINT32 *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Run + mSemaphoreSize * CpuIndex);\r
1844 mSmmMpSyncData->CpuData[CpuIndex].Present =\r
1845 (BOOLEAN *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Present + mSemaphoreSize * CpuIndex);\r
1846 *(mSmmMpSyncData->CpuData[CpuIndex].Busy) = 0;\r
1847 *(mSmmMpSyncData->CpuData[CpuIndex].Run) = 0;\r
1848 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;\r
1849 }\r
1850 }\r
1851}\r
1852\r
1853/**\r
1854 Initialize global data for MP synchronization.\r
1855\r
1856 @param Stacks Base address of SMI stack buffer for all processors.\r
1857 @param StackSize Stack size for each processor in SMM.\r
1858 @param ShadowStackSize Shadow Stack size for each processor in SMM.\r
1859\r
1860**/\r
1861UINT32\r
1862InitializeMpServiceData (\r
1863 IN VOID *Stacks,\r
1864 IN UINTN StackSize,\r
1865 IN UINTN ShadowStackSize\r
1866 )\r
1867{\r
1868 UINT32 Cr3;\r
1869 UINTN Index;\r
1870 UINT8 *GdtTssTables;\r
1871 UINTN GdtTableStepSize;\r
1872 CPUID_VERSION_INFO_EDX RegEdx;\r
1873\r
1874 //\r
1875 // Determine if this CPU supports machine check\r
1876 //\r
1877 AsmCpuid (CPUID_VERSION_INFO, NULL, NULL, NULL, &RegEdx.Uint32);\r
1878 mMachineCheckSupported = (BOOLEAN)(RegEdx.Bits.MCA == 1);\r
1879\r
1880 //\r
1881 // Allocate memory for all locks and semaphores\r
1882 //\r
1883 InitializeSmmCpuSemaphores ();\r
1884\r
1885 //\r
1886 // Initialize mSmmMpSyncData\r
1887 //\r
1888 mSmmMpSyncDataSize = sizeof (SMM_DISPATCHER_MP_SYNC_DATA) +\r
1889 (sizeof (SMM_CPU_DATA_BLOCK) + sizeof (BOOLEAN)) * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;\r
1890 mSmmMpSyncData = (SMM_DISPATCHER_MP_SYNC_DATA*) AllocatePages (EFI_SIZE_TO_PAGES (mSmmMpSyncDataSize));\r
1891 ASSERT (mSmmMpSyncData != NULL);\r
1892 mCpuSmmSyncMode = (SMM_CPU_SYNC_MODE)PcdGet8 (PcdCpuSmmSyncMode);\r
1893 InitializeMpSyncData ();\r
1894\r
1895 //\r
1896 // Initialize physical address mask\r
1897 // NOTE: Physical memory above virtual address limit is not supported !!!\r
1898 //\r
1899 AsmCpuid (0x80000008, (UINT32*)&Index, NULL, NULL, NULL);\r
1900 gPhyMask = LShiftU64 (1, (UINT8)Index) - 1;\r
1901 gPhyMask &= (1ull << 48) - EFI_PAGE_SIZE;\r
1902\r
1903 //\r
1904 // Create page tables\r
1905 //\r
1906 Cr3 = SmmInitPageTable ();\r
1907\r
1908 GdtTssTables = InitGdt (Cr3, &GdtTableStepSize);\r
1909\r
1910 //\r
1911 // Install SMI handler for each CPU\r
1912 //\r
1913 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
1914 InstallSmiHandler (\r
1915 Index,\r
1916 (UINT32)mCpuHotPlugData.SmBase[Index],\r
1917 (VOID*)((UINTN)Stacks + (StackSize + ShadowStackSize) * Index),\r
1918 StackSize,\r
1919 (UINTN)(GdtTssTables + GdtTableStepSize * Index),\r
1920 gcSmiGdtr.Limit + 1,\r
1921 gcSmiIdtr.Base,\r
1922 gcSmiIdtr.Limit + 1,\r
1923 Cr3\r
1924 );\r
1925 }\r
1926\r
1927 //\r
1928 // Record current MTRR settings\r
1929 //\r
1930 ZeroMem (&gSmiMtrrs, sizeof (gSmiMtrrs));\r
1931 MtrrGetAllMtrrs (&gSmiMtrrs);\r
1932\r
1933 return Cr3;\r
1934}\r
1935\r
1936/**\r
1937\r
1938 Register the SMM Foundation entry point.\r
1939\r
1940 @param This Pointer to EFI_SMM_CONFIGURATION_PROTOCOL instance\r
1941 @param SmmEntryPoint SMM Foundation EntryPoint\r
1942\r
1943 @retval EFI_SUCCESS Successfully to register SMM foundation entry point\r
1944\r
1945**/\r
1946EFI_STATUS\r
1947EFIAPI\r
1948RegisterSmmEntry (\r
1949 IN CONST EFI_SMM_CONFIGURATION_PROTOCOL *This,\r
1950 IN EFI_SMM_ENTRY_POINT SmmEntryPoint\r
1951 )\r
1952{\r
1953 //\r
1954 // Record SMM Foundation EntryPoint, later invoke it on SMI entry vector.\r
1955 //\r
1956 gSmmCpuPrivate->SmmCoreEntry = SmmEntryPoint;\r
1957 return EFI_SUCCESS;\r
1958}\r
1959\r
1960/**\r
1961\r
1962 Register the SMM Foundation entry point.\r
1963\r
1964 @param[in] Procedure A pointer to the code stream to be run on the designated target AP\r
1965 of the system. Type EFI_AP_PROCEDURE is defined below in Volume 2\r
1966 with the related definitions of\r
1967 EFI_MP_SERVICES_PROTOCOL.StartupAllAPs.\r
1968 If caller may pass a value of NULL to deregister any existing\r
1969 startup procedure.\r
1970 @param[in,out] ProcedureArguments Allows the caller to pass a list of parameters to the code that is\r
1971 run by the AP. It is an optional common mailbox between APs and\r
1972 the caller to share information\r
1973\r
1974 @retval EFI_SUCCESS The Procedure has been set successfully.\r
1975 @retval EFI_INVALID_PARAMETER The Procedure is NULL but ProcedureArguments not NULL.\r
1976\r
1977**/\r
1978EFI_STATUS\r
1979RegisterStartupProcedure (\r
1980 IN EFI_AP_PROCEDURE Procedure,\r
1981 IN OUT VOID *ProcedureArguments OPTIONAL\r
1982 )\r
1983{\r
1984 if (Procedure == NULL && ProcedureArguments != NULL) {\r
1985 return EFI_INVALID_PARAMETER;\r
1986 }\r
1987 if (mSmmMpSyncData == NULL) {\r
1988 return EFI_NOT_READY;\r
1989 }\r
1990\r
1991 mSmmMpSyncData->StartupProcedure = Procedure;\r
1992 mSmmMpSyncData->StartupProcArgs = ProcedureArguments;\r
1993\r
1994 return EFI_SUCCESS;\r
1995}\r