]> git.proxmox.com Git - mirror_edk2.git/blame_incremental - UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c
UefiCpuPkg: Replace BSD License with BSD+Patent License
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / MpService.c
... / ...
CommitLineData
1/** @file\r
2SMM MP service implementation\r
3\r
4Copyright (c) 2009 - 2019, Intel Corporation. All rights reserved.<BR>\r
5Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>\r
6\r
7SPDX-License-Identifier: BSD-2-Clause-Patent\r
8\r
9**/\r
10\r
11#include "PiSmmCpuDxeSmm.h"\r
12\r
13//\r
14// Slots for all MTRR( FIXED MTRR + VARIABLE MTRR + MTRR_LIB_IA32_MTRR_DEF_TYPE)\r
15//\r
16MTRR_SETTINGS gSmiMtrrs;\r
17UINT64 gPhyMask;\r
18SMM_DISPATCHER_MP_SYNC_DATA *mSmmMpSyncData = NULL;\r
19UINTN mSmmMpSyncDataSize;\r
20SMM_CPU_SEMAPHORES mSmmCpuSemaphores;\r
21UINTN mSemaphoreSize;\r
22SPIN_LOCK *mPFLock = NULL;\r
23SMM_CPU_SYNC_MODE mCpuSmmSyncMode;\r
24BOOLEAN mMachineCheckSupported = FALSE;\r
25\r
26/**\r
27 Performs an atomic compare exchange operation to get semaphore.\r
28 The compare exchange operation must be performed using\r
29 MP safe mechanisms.\r
30\r
31 @param Sem IN: 32-bit unsigned integer\r
32 OUT: original integer - 1\r
33 @return Original integer - 1\r
34\r
35**/\r
36UINT32\r
37WaitForSemaphore (\r
38 IN OUT volatile UINT32 *Sem\r
39 )\r
40{\r
41 UINT32 Value;\r
42\r
43 do {\r
44 Value = *Sem;\r
45 } while (Value == 0 ||\r
46 InterlockedCompareExchange32 (\r
47 (UINT32*)Sem,\r
48 Value,\r
49 Value - 1\r
50 ) != Value);\r
51 return Value - 1;\r
52}\r
53\r
54\r
55/**\r
56 Performs an atomic compare exchange operation to release semaphore.\r
57 The compare exchange operation must be performed using\r
58 MP safe mechanisms.\r
59\r
60 @param Sem IN: 32-bit unsigned integer\r
61 OUT: original integer + 1\r
62 @return Original integer + 1\r
63\r
64**/\r
65UINT32\r
66ReleaseSemaphore (\r
67 IN OUT volatile UINT32 *Sem\r
68 )\r
69{\r
70 UINT32 Value;\r
71\r
72 do {\r
73 Value = *Sem;\r
74 } while (Value + 1 != 0 &&\r
75 InterlockedCompareExchange32 (\r
76 (UINT32*)Sem,\r
77 Value,\r
78 Value + 1\r
79 ) != Value);\r
80 return Value + 1;\r
81}\r
82\r
83/**\r
84 Performs an atomic compare exchange operation to lock semaphore.\r
85 The compare exchange operation must be performed using\r
86 MP safe mechanisms.\r
87\r
88 @param Sem IN: 32-bit unsigned integer\r
89 OUT: -1\r
90 @return Original integer\r
91\r
92**/\r
93UINT32\r
94LockdownSemaphore (\r
95 IN OUT volatile UINT32 *Sem\r
96 )\r
97{\r
98 UINT32 Value;\r
99\r
100 do {\r
101 Value = *Sem;\r
102 } while (InterlockedCompareExchange32 (\r
103 (UINT32*)Sem,\r
104 Value, (UINT32)-1\r
105 ) != Value);\r
106 return Value;\r
107}\r
108\r
109/**\r
110 Wait all APs to performs an atomic compare exchange operation to release semaphore.\r
111\r
112 @param NumberOfAPs AP number\r
113\r
114**/\r
115VOID\r
116WaitForAllAPs (\r
117 IN UINTN NumberOfAPs\r
118 )\r
119{\r
120 UINTN BspIndex;\r
121\r
122 BspIndex = mSmmMpSyncData->BspIndex;\r
123 while (NumberOfAPs-- > 0) {\r
124 WaitForSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
125 }\r
126}\r
127\r
128/**\r
129 Performs an atomic compare exchange operation to release semaphore\r
130 for each AP.\r
131\r
132**/\r
133VOID\r
134ReleaseAllAPs (\r
135 VOID\r
136 )\r
137{\r
138 UINTN Index;\r
139 UINTN BspIndex;\r
140\r
141 BspIndex = mSmmMpSyncData->BspIndex;\r
142 for (Index = mMaxNumberOfCpus; Index-- > 0;) {\r
143 if (Index != BspIndex && *(mSmmMpSyncData->CpuData[Index].Present)) {\r
144 ReleaseSemaphore (mSmmMpSyncData->CpuData[Index].Run);\r
145 }\r
146 }\r
147}\r
148\r
149/**\r
150 Checks if all CPUs (with certain exceptions) have checked in for this SMI run\r
151\r
152 @param Exceptions CPU Arrival exception flags.\r
153\r
154 @retval TRUE if all CPUs the have checked in.\r
155 @retval FALSE if at least one Normal AP hasn't checked in.\r
156\r
157**/\r
158BOOLEAN\r
159AllCpusInSmmWithExceptions (\r
160 SMM_CPU_ARRIVAL_EXCEPTIONS Exceptions\r
161 )\r
162{\r
163 UINTN Index;\r
164 SMM_CPU_DATA_BLOCK *CpuData;\r
165 EFI_PROCESSOR_INFORMATION *ProcessorInfo;\r
166\r
167 ASSERT (*mSmmMpSyncData->Counter <= mNumberOfCpus);\r
168\r
169 if (*mSmmMpSyncData->Counter == mNumberOfCpus) {\r
170 return TRUE;\r
171 }\r
172\r
173 CpuData = mSmmMpSyncData->CpuData;\r
174 ProcessorInfo = gSmmCpuPrivate->ProcessorInfo;\r
175 for (Index = mMaxNumberOfCpus; Index-- > 0;) {\r
176 if (!(*(CpuData[Index].Present)) && ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID) {\r
177 if (((Exceptions & ARRIVAL_EXCEPTION_DELAYED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmDelayed) != 0) {\r
178 continue;\r
179 }\r
180 if (((Exceptions & ARRIVAL_EXCEPTION_BLOCKED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmBlocked) != 0) {\r
181 continue;\r
182 }\r
183 if (((Exceptions & ARRIVAL_EXCEPTION_SMI_DISABLED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmEnable) != 0) {\r
184 continue;\r
185 }\r
186 return FALSE;\r
187 }\r
188 }\r
189\r
190\r
191 return TRUE;\r
192}\r
193\r
194/**\r
195 Has OS enabled Lmce in the MSR_IA32_MCG_EXT_CTL\r
196\r
197 @retval TRUE Os enable lmce.\r
198 @retval FALSE Os not enable lmce.\r
199\r
200**/\r
201BOOLEAN\r
202IsLmceOsEnabled (\r
203 VOID\r
204 )\r
205{\r
206 MSR_IA32_MCG_CAP_REGISTER McgCap;\r
207 MSR_IA32_FEATURE_CONTROL_REGISTER FeatureCtrl;\r
208 MSR_IA32_MCG_EXT_CTL_REGISTER McgExtCtrl;\r
209\r
210 McgCap.Uint64 = AsmReadMsr64 (MSR_IA32_MCG_CAP);\r
211 if (McgCap.Bits.MCG_LMCE_P == 0) {\r
212 return FALSE;\r
213 }\r
214\r
215 FeatureCtrl.Uint64 = AsmReadMsr64 (MSR_IA32_FEATURE_CONTROL);\r
216 if (FeatureCtrl.Bits.LmceOn == 0) {\r
217 return FALSE;\r
218 }\r
219\r
220 McgExtCtrl.Uint64 = AsmReadMsr64 (MSR_IA32_MCG_EXT_CTL);\r
221 return (BOOLEAN) (McgExtCtrl.Bits.LMCE_EN == 1);\r
222}\r
223\r
224/**\r
225 Return if Local machine check exception signaled.\r
226\r
227 Indicates (when set) that a local machine check exception was generated. This indicates that the current machine-check event was\r
228 delivered to only the logical processor.\r
229\r
230 @retval TRUE LMCE was signaled.\r
231 @retval FALSE LMCE was not signaled.\r
232\r
233**/\r
234BOOLEAN\r
235IsLmceSignaled (\r
236 VOID\r
237 )\r
238{\r
239 MSR_IA32_MCG_STATUS_REGISTER McgStatus;\r
240\r
241 McgStatus.Uint64 = AsmReadMsr64 (MSR_IA32_MCG_STATUS);\r
242 return (BOOLEAN) (McgStatus.Bits.LMCE_S == 1);\r
243}\r
244\r
245/**\r
246 Given timeout constraint, wait for all APs to arrive, and insure when this function returns, no AP will execute normal mode code before\r
247 entering SMM, except SMI disabled APs.\r
248\r
249**/\r
250VOID\r
251SmmWaitForApArrival (\r
252 VOID\r
253 )\r
254{\r
255 UINT64 Timer;\r
256 UINTN Index;\r
257 BOOLEAN LmceEn;\r
258 BOOLEAN LmceSignal;\r
259\r
260 ASSERT (*mSmmMpSyncData->Counter <= mNumberOfCpus);\r
261\r
262 LmceEn = FALSE;\r
263 LmceSignal = FALSE;\r
264 if (mMachineCheckSupported) {\r
265 LmceEn = IsLmceOsEnabled ();\r
266 LmceSignal = IsLmceSignaled();\r
267 }\r
268\r
269 //\r
270 // Platform implementor should choose a timeout value appropriately:\r
271 // - The timeout value should balance the SMM time constrains and the likelihood that delayed CPUs are excluded in the SMM run. Note\r
272 // the SMI Handlers must ALWAYS take into account the cases that not all APs are available in an SMI run.\r
273 // - The timeout value must, in the case of 2nd timeout, be at least long enough to give time for all APs to receive the SMI IPI\r
274 // and either enter SMM or buffer the SMI, to insure there is no CPU running normal mode code when SMI handling starts. This will\r
275 // be TRUE even if a blocked CPU is brought out of the blocked state by a normal mode CPU (before the normal mode CPU received the\r
276 // SMI IPI), because with a buffered SMI, and CPU will enter SMM immediately after it is brought out of the blocked state.\r
277 // - The timeout value must be longer than longest possible IO operation in the system\r
278 //\r
279\r
280 //\r
281 // Sync with APs 1st timeout\r
282 //\r
283 for (Timer = StartSyncTimer ();\r
284 !IsSyncTimerTimeout (Timer) && !(LmceEn && LmceSignal) &&\r
285 !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED | ARRIVAL_EXCEPTION_SMI_DISABLED );\r
286 ) {\r
287 CpuPause ();\r
288 }\r
289\r
290 //\r
291 // Not all APs have arrived, so we need 2nd round of timeout. IPIs should be sent to ALL none present APs,\r
292 // because:\r
293 // a) Delayed AP may have just come out of the delayed state. Blocked AP may have just been brought out of blocked state by some AP running\r
294 // normal mode code. These APs need to be guaranteed to have an SMI pending to insure that once they are out of delayed / blocked state, they\r
295 // enter SMI immediately without executing instructions in normal mode. Note traditional flow requires there are no APs doing normal mode\r
296 // work while SMI handling is on-going.\r
297 // b) As a consequence of SMI IPI sending, (spurious) SMI may occur after this SMM run.\r
298 // c) ** NOTE **: Use SMI disabling feature VERY CAREFULLY (if at all) for traditional flow, because a processor in SMI-disabled state\r
299 // will execute normal mode code, which breaks the traditional SMI handlers' assumption that no APs are doing normal\r
300 // mode work while SMI handling is on-going.\r
301 // d) We don't add code to check SMI disabling status to skip sending IPI to SMI disabled APs, because:\r
302 // - In traditional flow, SMI disabling is discouraged.\r
303 // - In relaxed flow, CheckApArrival() will check SMI disabling status before calling this function.\r
304 // In both cases, adding SMI-disabling checking code increases overhead.\r
305 //\r
306 if (*mSmmMpSyncData->Counter < mNumberOfCpus) {\r
307 //\r
308 // Send SMI IPIs to bring outside processors in\r
309 //\r
310 for (Index = mMaxNumberOfCpus; Index-- > 0;) {\r
311 if (!(*(mSmmMpSyncData->CpuData[Index].Present)) && gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID) {\r
312 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId);\r
313 }\r
314 }\r
315\r
316 //\r
317 // Sync with APs 2nd timeout.\r
318 //\r
319 for (Timer = StartSyncTimer ();\r
320 !IsSyncTimerTimeout (Timer) &&\r
321 !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED | ARRIVAL_EXCEPTION_SMI_DISABLED );\r
322 ) {\r
323 CpuPause ();\r
324 }\r
325 }\r
326\r
327 return;\r
328}\r
329\r
330\r
331/**\r
332 Replace OS MTRR's with SMI MTRR's.\r
333\r
334 @param CpuIndex Processor Index\r
335\r
336**/\r
337VOID\r
338ReplaceOSMtrrs (\r
339 IN UINTN CpuIndex\r
340 )\r
341{\r
342 SmmCpuFeaturesDisableSmrr ();\r
343\r
344 //\r
345 // Replace all MTRRs registers\r
346 //\r
347 MtrrSetAllMtrrs (&gSmiMtrrs);\r
348}\r
349\r
350/**\r
351 SMI handler for BSP.\r
352\r
353 @param CpuIndex BSP processor Index\r
354 @param SyncMode SMM MP sync mode\r
355\r
356**/\r
357VOID\r
358BSPHandler (\r
359 IN UINTN CpuIndex,\r
360 IN SMM_CPU_SYNC_MODE SyncMode\r
361 )\r
362{\r
363 UINTN Index;\r
364 MTRR_SETTINGS Mtrrs;\r
365 UINTN ApCount;\r
366 BOOLEAN ClearTopLevelSmiResult;\r
367 UINTN PresentCount;\r
368\r
369 ASSERT (CpuIndex == mSmmMpSyncData->BspIndex);\r
370 ApCount = 0;\r
371\r
372 //\r
373 // Flag BSP's presence\r
374 //\r
375 *mSmmMpSyncData->InsideSmm = TRUE;\r
376\r
377 //\r
378 // Initialize Debug Agent to start source level debug in BSP handler\r
379 //\r
380 InitializeDebugAgent (DEBUG_AGENT_INIT_ENTER_SMI, NULL, NULL);\r
381\r
382 //\r
383 // Mark this processor's presence\r
384 //\r
385 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = TRUE;\r
386\r
387 //\r
388 // Clear platform top level SMI status bit before calling SMI handlers. If\r
389 // we cleared it after SMI handlers are run, we would miss the SMI that\r
390 // occurs after SMI handlers are done and before SMI status bit is cleared.\r
391 //\r
392 ClearTopLevelSmiResult = ClearTopLevelSmiStatus();\r
393 ASSERT (ClearTopLevelSmiResult == TRUE);\r
394\r
395 //\r
396 // Set running processor index\r
397 //\r
398 gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu = CpuIndex;\r
399\r
400 //\r
401 // If Traditional Sync Mode or need to configure MTRRs: gather all available APs.\r
402 //\r
403 if (SyncMode == SmmCpuSyncModeTradition || SmmCpuFeaturesNeedConfigureMtrrs()) {\r
404\r
405 //\r
406 // Wait for APs to arrive\r
407 //\r
408 SmmWaitForApArrival();\r
409\r
410 //\r
411 // Lock the counter down and retrieve the number of APs\r
412 //\r
413 *mSmmMpSyncData->AllCpusInSync = TRUE;\r
414 ApCount = LockdownSemaphore (mSmmMpSyncData->Counter) - 1;\r
415\r
416 //\r
417 // Wait for all APs to get ready for programming MTRRs\r
418 //\r
419 WaitForAllAPs (ApCount);\r
420\r
421 if (SmmCpuFeaturesNeedConfigureMtrrs()) {\r
422 //\r
423 // Signal all APs it's time for backup MTRRs\r
424 //\r
425 ReleaseAllAPs ();\r
426\r
427 //\r
428 // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at\r
429 // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set\r
430 // to a large enough value to avoid this situation.\r
431 // Note: For HT capable CPUs, threads within a core share the same set of MTRRs.\r
432 // We do the backup first and then set MTRR to avoid race condition for threads\r
433 // in the same core.\r
434 //\r
435 MtrrGetAllMtrrs(&Mtrrs);\r
436\r
437 //\r
438 // Wait for all APs to complete their MTRR saving\r
439 //\r
440 WaitForAllAPs (ApCount);\r
441\r
442 //\r
443 // Let all processors program SMM MTRRs together\r
444 //\r
445 ReleaseAllAPs ();\r
446\r
447 //\r
448 // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at\r
449 // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set\r
450 // to a large enough value to avoid this situation.\r
451 //\r
452 ReplaceOSMtrrs (CpuIndex);\r
453\r
454 //\r
455 // Wait for all APs to complete their MTRR programming\r
456 //\r
457 WaitForAllAPs (ApCount);\r
458 }\r
459 }\r
460\r
461 //\r
462 // The BUSY lock is initialized to Acquired state\r
463 //\r
464 AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
465\r
466 //\r
467 // Perform the pre tasks\r
468 //\r
469 PerformPreTasks ();\r
470\r
471 //\r
472 // Invoke SMM Foundation EntryPoint with the processor information context.\r
473 //\r
474 gSmmCpuPrivate->SmmCoreEntry (&gSmmCpuPrivate->SmmCoreEntryContext);\r
475\r
476 //\r
477 // Make sure all APs have completed their pending none-block tasks\r
478 //\r
479 for (Index = mMaxNumberOfCpus; Index-- > 0;) {\r
480 if (Index != CpuIndex && *(mSmmMpSyncData->CpuData[Index].Present)) {\r
481 AcquireSpinLock (mSmmMpSyncData->CpuData[Index].Busy);\r
482 ReleaseSpinLock (mSmmMpSyncData->CpuData[Index].Busy);\r
483 }\r
484 }\r
485\r
486 //\r
487 // Perform the remaining tasks\r
488 //\r
489 PerformRemainingTasks ();\r
490\r
491 //\r
492 // If Relaxed-AP Sync Mode: gather all available APs after BSP SMM handlers are done, and\r
493 // make those APs to exit SMI synchronously. APs which arrive later will be excluded and\r
494 // will run through freely.\r
495 //\r
496 if (SyncMode != SmmCpuSyncModeTradition && !SmmCpuFeaturesNeedConfigureMtrrs()) {\r
497\r
498 //\r
499 // Lock the counter down and retrieve the number of APs\r
500 //\r
501 *mSmmMpSyncData->AllCpusInSync = TRUE;\r
502 ApCount = LockdownSemaphore (mSmmMpSyncData->Counter) - 1;\r
503 //\r
504 // Make sure all APs have their Present flag set\r
505 //\r
506 while (TRUE) {\r
507 PresentCount = 0;\r
508 for (Index = mMaxNumberOfCpus; Index-- > 0;) {\r
509 if (*(mSmmMpSyncData->CpuData[Index].Present)) {\r
510 PresentCount ++;\r
511 }\r
512 }\r
513 if (PresentCount > ApCount) {\r
514 break;\r
515 }\r
516 }\r
517 }\r
518\r
519 //\r
520 // Notify all APs to exit\r
521 //\r
522 *mSmmMpSyncData->InsideSmm = FALSE;\r
523 ReleaseAllAPs ();\r
524\r
525 //\r
526 // Wait for all APs to complete their pending tasks\r
527 //\r
528 WaitForAllAPs (ApCount);\r
529\r
530 if (SmmCpuFeaturesNeedConfigureMtrrs()) {\r
531 //\r
532 // Signal APs to restore MTRRs\r
533 //\r
534 ReleaseAllAPs ();\r
535\r
536 //\r
537 // Restore OS MTRRs\r
538 //\r
539 SmmCpuFeaturesReenableSmrr ();\r
540 MtrrSetAllMtrrs(&Mtrrs);\r
541\r
542 //\r
543 // Wait for all APs to complete MTRR programming\r
544 //\r
545 WaitForAllAPs (ApCount);\r
546 }\r
547\r
548 //\r
549 // Stop source level debug in BSP handler, the code below will not be\r
550 // debugged.\r
551 //\r
552 InitializeDebugAgent (DEBUG_AGENT_INIT_EXIT_SMI, NULL, NULL);\r
553\r
554 //\r
555 // Signal APs to Reset states/semaphore for this processor\r
556 //\r
557 ReleaseAllAPs ();\r
558\r
559 //\r
560 // Perform pending operations for hot-plug\r
561 //\r
562 SmmCpuUpdate ();\r
563\r
564 //\r
565 // Clear the Present flag of BSP\r
566 //\r
567 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;\r
568\r
569 //\r
570 // Gather APs to exit SMM synchronously. Note the Present flag is cleared by now but\r
571 // WaitForAllAps does not depend on the Present flag.\r
572 //\r
573 WaitForAllAPs (ApCount);\r
574\r
575 //\r
576 // Reset BspIndex to -1, meaning BSP has not been elected.\r
577 //\r
578 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {\r
579 mSmmMpSyncData->BspIndex = (UINT32)-1;\r
580 }\r
581\r
582 //\r
583 // Allow APs to check in from this point on\r
584 //\r
585 *mSmmMpSyncData->Counter = 0;\r
586 *mSmmMpSyncData->AllCpusInSync = FALSE;\r
587}\r
588\r
589/**\r
590 SMI handler for AP.\r
591\r
592 @param CpuIndex AP processor Index.\r
593 @param ValidSmi Indicates that current SMI is a valid SMI or not.\r
594 @param SyncMode SMM MP sync mode.\r
595\r
596**/\r
597VOID\r
598APHandler (\r
599 IN UINTN CpuIndex,\r
600 IN BOOLEAN ValidSmi,\r
601 IN SMM_CPU_SYNC_MODE SyncMode\r
602 )\r
603{\r
604 UINT64 Timer;\r
605 UINTN BspIndex;\r
606 MTRR_SETTINGS Mtrrs;\r
607\r
608 //\r
609 // Timeout BSP\r
610 //\r
611 for (Timer = StartSyncTimer ();\r
612 !IsSyncTimerTimeout (Timer) &&\r
613 !(*mSmmMpSyncData->InsideSmm);\r
614 ) {\r
615 CpuPause ();\r
616 }\r
617\r
618 if (!(*mSmmMpSyncData->InsideSmm)) {\r
619 //\r
620 // BSP timeout in the first round\r
621 //\r
622 if (mSmmMpSyncData->BspIndex != -1) {\r
623 //\r
624 // BSP Index is known\r
625 //\r
626 BspIndex = mSmmMpSyncData->BspIndex;\r
627 ASSERT (CpuIndex != BspIndex);\r
628\r
629 //\r
630 // Send SMI IPI to bring BSP in\r
631 //\r
632 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[BspIndex].ProcessorId);\r
633\r
634 //\r
635 // Now clock BSP for the 2nd time\r
636 //\r
637 for (Timer = StartSyncTimer ();\r
638 !IsSyncTimerTimeout (Timer) &&\r
639 !(*mSmmMpSyncData->InsideSmm);\r
640 ) {\r
641 CpuPause ();\r
642 }\r
643\r
644 if (!(*mSmmMpSyncData->InsideSmm)) {\r
645 //\r
646 // Give up since BSP is unable to enter SMM\r
647 // and signal the completion of this AP\r
648 WaitForSemaphore (mSmmMpSyncData->Counter);\r
649 return;\r
650 }\r
651 } else {\r
652 //\r
653 // Don't know BSP index. Give up without sending IPI to BSP.\r
654 //\r
655 WaitForSemaphore (mSmmMpSyncData->Counter);\r
656 return;\r
657 }\r
658 }\r
659\r
660 //\r
661 // BSP is available\r
662 //\r
663 BspIndex = mSmmMpSyncData->BspIndex;\r
664 ASSERT (CpuIndex != BspIndex);\r
665\r
666 //\r
667 // Mark this processor's presence\r
668 //\r
669 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = TRUE;\r
670\r
671 if (SyncMode == SmmCpuSyncModeTradition || SmmCpuFeaturesNeedConfigureMtrrs()) {\r
672 //\r
673 // Notify BSP of arrival at this point\r
674 //\r
675 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
676 }\r
677\r
678 if (SmmCpuFeaturesNeedConfigureMtrrs()) {\r
679 //\r
680 // Wait for the signal from BSP to backup MTRRs\r
681 //\r
682 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r
683\r
684 //\r
685 // Backup OS MTRRs\r
686 //\r
687 MtrrGetAllMtrrs(&Mtrrs);\r
688\r
689 //\r
690 // Signal BSP the completion of this AP\r
691 //\r
692 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
693\r
694 //\r
695 // Wait for BSP's signal to program MTRRs\r
696 //\r
697 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r
698\r
699 //\r
700 // Replace OS MTRRs with SMI MTRRs\r
701 //\r
702 ReplaceOSMtrrs (CpuIndex);\r
703\r
704 //\r
705 // Signal BSP the completion of this AP\r
706 //\r
707 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
708 }\r
709\r
710 while (TRUE) {\r
711 //\r
712 // Wait for something to happen\r
713 //\r
714 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r
715\r
716 //\r
717 // Check if BSP wants to exit SMM\r
718 //\r
719 if (!(*mSmmMpSyncData->InsideSmm)) {\r
720 break;\r
721 }\r
722\r
723 //\r
724 // BUSY should be acquired by SmmStartupThisAp()\r
725 //\r
726 ASSERT (\r
727 !AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[CpuIndex].Busy)\r
728 );\r
729\r
730 //\r
731 // Invoke the scheduled procedure\r
732 //\r
733 (*mSmmMpSyncData->CpuData[CpuIndex].Procedure) (\r
734 (VOID*)mSmmMpSyncData->CpuData[CpuIndex].Parameter\r
735 );\r
736\r
737 //\r
738 // Release BUSY\r
739 //\r
740 ReleaseSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
741 }\r
742\r
743 if (SmmCpuFeaturesNeedConfigureMtrrs()) {\r
744 //\r
745 // Notify BSP the readiness of this AP to program MTRRs\r
746 //\r
747 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
748\r
749 //\r
750 // Wait for the signal from BSP to program MTRRs\r
751 //\r
752 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r
753\r
754 //\r
755 // Restore OS MTRRs\r
756 //\r
757 SmmCpuFeaturesReenableSmrr ();\r
758 MtrrSetAllMtrrs(&Mtrrs);\r
759 }\r
760\r
761 //\r
762 // Notify BSP the readiness of this AP to Reset states/semaphore for this processor\r
763 //\r
764 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
765\r
766 //\r
767 // Wait for the signal from BSP to Reset states/semaphore for this processor\r
768 //\r
769 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r
770\r
771 //\r
772 // Reset states/semaphore for this processor\r
773 //\r
774 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;\r
775\r
776 //\r
777 // Notify BSP the readiness of this AP to exit SMM\r
778 //\r
779 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
780\r
781}\r
782\r
783/**\r
784 Create 4G PageTable in SMRAM.\r
785\r
786 @param[in] Is32BitPageTable Whether the page table is 32-bit PAE\r
787 @return PageTable Address\r
788\r
789**/\r
790UINT32\r
791Gen4GPageTable (\r
792 IN BOOLEAN Is32BitPageTable\r
793 )\r
794{\r
795 VOID *PageTable;\r
796 UINTN Index;\r
797 UINT64 *Pte;\r
798 UINTN PagesNeeded;\r
799 UINTN Low2MBoundary;\r
800 UINTN High2MBoundary;\r
801 UINTN Pages;\r
802 UINTN GuardPage;\r
803 UINT64 *Pdpte;\r
804 UINTN PageIndex;\r
805 UINTN PageAddress;\r
806\r
807 Low2MBoundary = 0;\r
808 High2MBoundary = 0;\r
809 PagesNeeded = 0;\r
810 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
811 //\r
812 // Add one more page for known good stack, then find the lower 2MB aligned address.\r
813 //\r
814 Low2MBoundary = (mSmmStackArrayBase + EFI_PAGE_SIZE) & ~(SIZE_2MB-1);\r
815 //\r
816 // Add two more pages for known good stack and stack guard page,\r
817 // then find the lower 2MB aligned address.\r
818 //\r
819 High2MBoundary = (mSmmStackArrayEnd - mSmmStackSize + EFI_PAGE_SIZE * 2) & ~(SIZE_2MB-1);\r
820 PagesNeeded = ((High2MBoundary - Low2MBoundary) / SIZE_2MB) + 1;\r
821 }\r
822 //\r
823 // Allocate the page table\r
824 //\r
825 PageTable = AllocatePageTableMemory (5 + PagesNeeded);\r
826 ASSERT (PageTable != NULL);\r
827\r
828 PageTable = (VOID *)((UINTN)PageTable);\r
829 Pte = (UINT64*)PageTable;\r
830\r
831 //\r
832 // Zero out all page table entries first\r
833 //\r
834 ZeroMem (Pte, EFI_PAGES_TO_SIZE (1));\r
835\r
836 //\r
837 // Set Page Directory Pointers\r
838 //\r
839 for (Index = 0; Index < 4; Index++) {\r
840 Pte[Index] = ((UINTN)PageTable + EFI_PAGE_SIZE * (Index + 1)) | mAddressEncMask |\r
841 (Is32BitPageTable ? IA32_PAE_PDPTE_ATTRIBUTE_BITS : PAGE_ATTRIBUTE_BITS);\r
842 }\r
843 Pte += EFI_PAGE_SIZE / sizeof (*Pte);\r
844\r
845 //\r
846 // Fill in Page Directory Entries\r
847 //\r
848 for (Index = 0; Index < EFI_PAGE_SIZE * 4 / sizeof (*Pte); Index++) {\r
849 Pte[Index] = (Index << 21) | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;\r
850 }\r
851\r
852 Pdpte = (UINT64*)PageTable;\r
853 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
854 Pages = (UINTN)PageTable + EFI_PAGES_TO_SIZE (5);\r
855 GuardPage = mSmmStackArrayBase + EFI_PAGE_SIZE;\r
856 for (PageIndex = Low2MBoundary; PageIndex <= High2MBoundary; PageIndex += SIZE_2MB) {\r
857 Pte = (UINT64*)(UINTN)(Pdpte[BitFieldRead32 ((UINT32)PageIndex, 30, 31)] & ~mAddressEncMask & ~(EFI_PAGE_SIZE - 1));\r
858 Pte[BitFieldRead32 ((UINT32)PageIndex, 21, 29)] = (UINT64)Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
859 //\r
860 // Fill in Page Table Entries\r
861 //\r
862 Pte = (UINT64*)Pages;\r
863 PageAddress = PageIndex;\r
864 for (Index = 0; Index < EFI_PAGE_SIZE / sizeof (*Pte); Index++) {\r
865 if (PageAddress == GuardPage) {\r
866 //\r
867 // Mark the guard page as non-present\r
868 //\r
869 Pte[Index] = PageAddress | mAddressEncMask;\r
870 GuardPage += mSmmStackSize;\r
871 if (GuardPage > mSmmStackArrayEnd) {\r
872 GuardPage = 0;\r
873 }\r
874 } else {\r
875 Pte[Index] = PageAddress | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
876 }\r
877 PageAddress+= EFI_PAGE_SIZE;\r
878 }\r
879 Pages += EFI_PAGE_SIZE;\r
880 }\r
881 }\r
882\r
883 if ((PcdGet8 (PcdNullPointerDetectionPropertyMask) & BIT1) != 0) {\r
884 Pte = (UINT64*)(UINTN)(Pdpte[0] & ~mAddressEncMask & ~(EFI_PAGE_SIZE - 1));\r
885 if ((Pte[0] & IA32_PG_PS) == 0) {\r
886 // 4K-page entries are already mapped. Just hide the first one anyway.\r
887 Pte = (UINT64*)(UINTN)(Pte[0] & ~mAddressEncMask & ~(EFI_PAGE_SIZE - 1));\r
888 Pte[0] &= ~(UINT64)IA32_PG_P; // Hide page 0\r
889 } else {\r
890 // Create 4K-page entries\r
891 Pages = (UINTN)AllocatePageTableMemory (1);\r
892 ASSERT (Pages != 0);\r
893\r
894 Pte[0] = (UINT64)(Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS);\r
895\r
896 Pte = (UINT64*)Pages;\r
897 PageAddress = 0;\r
898 Pte[0] = PageAddress | mAddressEncMask; // Hide page 0 but present left\r
899 for (Index = 1; Index < EFI_PAGE_SIZE / sizeof (*Pte); Index++) {\r
900 PageAddress += EFI_PAGE_SIZE;\r
901 Pte[Index] = PageAddress | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
902 }\r
903 }\r
904 }\r
905\r
906 return (UINT32)(UINTN)PageTable;\r
907}\r
908\r
909/**\r
910 Schedule a procedure to run on the specified CPU.\r
911\r
912 @param[in] Procedure The address of the procedure to run\r
913 @param[in] CpuIndex Target CPU Index\r
914 @param[in, out] ProcArguments The parameter to pass to the procedure\r
915 @param[in] BlockingMode Startup AP in blocking mode or not\r
916\r
917 @retval EFI_INVALID_PARAMETER CpuNumber not valid\r
918 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP\r
919 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM\r
920 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy\r
921 @retval EFI_SUCCESS The procedure has been successfully scheduled\r
922\r
923**/\r
924EFI_STATUS\r
925InternalSmmStartupThisAp (\r
926 IN EFI_AP_PROCEDURE Procedure,\r
927 IN UINTN CpuIndex,\r
928 IN OUT VOID *ProcArguments OPTIONAL,\r
929 IN BOOLEAN BlockingMode\r
930 )\r
931{\r
932 if (CpuIndex >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus) {\r
933 DEBUG((DEBUG_ERROR, "CpuIndex(%d) >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus(%d)\n", CpuIndex, gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus));\r
934 return EFI_INVALID_PARAMETER;\r
935 }\r
936 if (CpuIndex == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) {\r
937 DEBUG((DEBUG_ERROR, "CpuIndex(%d) == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu\n", CpuIndex));\r
938 return EFI_INVALID_PARAMETER;\r
939 }\r
940 if (gSmmCpuPrivate->ProcessorInfo[CpuIndex].ProcessorId == INVALID_APIC_ID) {\r
941 return EFI_INVALID_PARAMETER;\r
942 }\r
943 if (!(*(mSmmMpSyncData->CpuData[CpuIndex].Present))) {\r
944 if (mSmmMpSyncData->EffectiveSyncMode == SmmCpuSyncModeTradition) {\r
945 DEBUG((DEBUG_ERROR, "!mSmmMpSyncData->CpuData[%d].Present\n", CpuIndex));\r
946 }\r
947 return EFI_INVALID_PARAMETER;\r
948 }\r
949 if (gSmmCpuPrivate->Operation[CpuIndex] == SmmCpuRemove) {\r
950 if (!FeaturePcdGet (PcdCpuHotPlugSupport)) {\r
951 DEBUG((DEBUG_ERROR, "gSmmCpuPrivate->Operation[%d] == SmmCpuRemove\n", CpuIndex));\r
952 }\r
953 return EFI_INVALID_PARAMETER;\r
954 }\r
955\r
956 if (BlockingMode) {\r
957 AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
958 } else {\r
959 if (!AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[CpuIndex].Busy)) {\r
960 DEBUG((DEBUG_ERROR, "mSmmMpSyncData->CpuData[%d].Busy\n", CpuIndex));\r
961 return EFI_INVALID_PARAMETER;\r
962 }\r
963 }\r
964\r
965 mSmmMpSyncData->CpuData[CpuIndex].Procedure = Procedure;\r
966 mSmmMpSyncData->CpuData[CpuIndex].Parameter = ProcArguments;\r
967 ReleaseSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r
968\r
969 if (BlockingMode) {\r
970 AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
971 ReleaseSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
972 }\r
973 return EFI_SUCCESS;\r
974}\r
975\r
976/**\r
977 Schedule a procedure to run on the specified CPU in blocking mode.\r
978\r
979 @param[in] Procedure The address of the procedure to run\r
980 @param[in] CpuIndex Target CPU Index\r
981 @param[in, out] ProcArguments The parameter to pass to the procedure\r
982\r
983 @retval EFI_INVALID_PARAMETER CpuNumber not valid\r
984 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP\r
985 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM\r
986 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy\r
987 @retval EFI_SUCCESS The procedure has been successfully scheduled\r
988\r
989**/\r
990EFI_STATUS\r
991EFIAPI\r
992SmmBlockingStartupThisAp (\r
993 IN EFI_AP_PROCEDURE Procedure,\r
994 IN UINTN CpuIndex,\r
995 IN OUT VOID *ProcArguments OPTIONAL\r
996 )\r
997{\r
998 return InternalSmmStartupThisAp(Procedure, CpuIndex, ProcArguments, TRUE);\r
999}\r
1000\r
1001/**\r
1002 Schedule a procedure to run on the specified CPU.\r
1003\r
1004 @param Procedure The address of the procedure to run\r
1005 @param CpuIndex Target CPU Index\r
1006 @param ProcArguments The parameter to pass to the procedure\r
1007\r
1008 @retval EFI_INVALID_PARAMETER CpuNumber not valid\r
1009 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP\r
1010 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM\r
1011 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy\r
1012 @retval EFI_SUCCESS The procedure has been successfully scheduled\r
1013\r
1014**/\r
1015EFI_STATUS\r
1016EFIAPI\r
1017SmmStartupThisAp (\r
1018 IN EFI_AP_PROCEDURE Procedure,\r
1019 IN UINTN CpuIndex,\r
1020 IN OUT VOID *ProcArguments OPTIONAL\r
1021 )\r
1022{\r
1023 return InternalSmmStartupThisAp(Procedure, CpuIndex, ProcArguments, FeaturePcdGet (PcdCpuSmmBlockStartupThisAp));\r
1024}\r
1025\r
1026/**\r
1027 This function sets DR6 & DR7 according to SMM save state, before running SMM C code.\r
1028 They are useful when you want to enable hardware breakpoints in SMM without entry SMM mode.\r
1029\r
1030 NOTE: It might not be appreciated in runtime since it might\r
1031 conflict with OS debugging facilities. Turn them off in RELEASE.\r
1032\r
1033 @param CpuIndex CPU Index\r
1034\r
1035**/\r
1036VOID\r
1037EFIAPI\r
1038CpuSmmDebugEntry (\r
1039 IN UINTN CpuIndex\r
1040 )\r
1041{\r
1042 SMRAM_SAVE_STATE_MAP *CpuSaveState;\r
1043\r
1044 if (FeaturePcdGet (PcdCpuSmmDebug)) {\r
1045 ASSERT(CpuIndex < mMaxNumberOfCpus);\r
1046 CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];\r
1047 if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {\r
1048 AsmWriteDr6 (CpuSaveState->x86._DR6);\r
1049 AsmWriteDr7 (CpuSaveState->x86._DR7);\r
1050 } else {\r
1051 AsmWriteDr6 ((UINTN)CpuSaveState->x64._DR6);\r
1052 AsmWriteDr7 ((UINTN)CpuSaveState->x64._DR7);\r
1053 }\r
1054 }\r
1055}\r
1056\r
1057/**\r
1058 This function restores DR6 & DR7 to SMM save state.\r
1059\r
1060 NOTE: It might not be appreciated in runtime since it might\r
1061 conflict with OS debugging facilities. Turn them off in RELEASE.\r
1062\r
1063 @param CpuIndex CPU Index\r
1064\r
1065**/\r
1066VOID\r
1067EFIAPI\r
1068CpuSmmDebugExit (\r
1069 IN UINTN CpuIndex\r
1070 )\r
1071{\r
1072 SMRAM_SAVE_STATE_MAP *CpuSaveState;\r
1073\r
1074 if (FeaturePcdGet (PcdCpuSmmDebug)) {\r
1075 ASSERT(CpuIndex < mMaxNumberOfCpus);\r
1076 CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];\r
1077 if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {\r
1078 CpuSaveState->x86._DR7 = (UINT32)AsmReadDr7 ();\r
1079 CpuSaveState->x86._DR6 = (UINT32)AsmReadDr6 ();\r
1080 } else {\r
1081 CpuSaveState->x64._DR7 = AsmReadDr7 ();\r
1082 CpuSaveState->x64._DR6 = AsmReadDr6 ();\r
1083 }\r
1084 }\r
1085}\r
1086\r
1087/**\r
1088 C function for SMI entry, each processor comes here upon SMI trigger.\r
1089\r
1090 @param CpuIndex CPU Index\r
1091\r
1092**/\r
1093VOID\r
1094EFIAPI\r
1095SmiRendezvous (\r
1096 IN UINTN CpuIndex\r
1097 )\r
1098{\r
1099 EFI_STATUS Status;\r
1100 BOOLEAN ValidSmi;\r
1101 BOOLEAN IsBsp;\r
1102 BOOLEAN BspInProgress;\r
1103 UINTN Index;\r
1104 UINTN Cr2;\r
1105\r
1106 ASSERT(CpuIndex < mMaxNumberOfCpus);\r
1107\r
1108 //\r
1109 // Save Cr2 because Page Fault exception in SMM may override its value,\r
1110 // when using on-demand paging for above 4G memory.\r
1111 //\r
1112 Cr2 = 0;\r
1113 SaveCr2 (&Cr2);\r
1114\r
1115 //\r
1116 // Perform CPU specific entry hooks\r
1117 //\r
1118 SmmCpuFeaturesRendezvousEntry (CpuIndex);\r
1119\r
1120 //\r
1121 // Determine if this is a valid SMI\r
1122 //\r
1123 ValidSmi = PlatformValidSmi();\r
1124\r
1125 //\r
1126 // Determine if BSP has been already in progress. Note this must be checked after\r
1127 // ValidSmi because BSP may clear a valid SMI source after checking in.\r
1128 //\r
1129 BspInProgress = *mSmmMpSyncData->InsideSmm;\r
1130\r
1131 if (!BspInProgress && !ValidSmi) {\r
1132 //\r
1133 // If we reach here, it means when we sampled the ValidSmi flag, SMI status had not\r
1134 // been cleared by BSP in a new SMI run (so we have a truly invalid SMI), or SMI\r
1135 // status had been cleared by BSP and an existing SMI run has almost ended. (Note\r
1136 // we sampled ValidSmi flag BEFORE judging BSP-in-progress status.) In both cases, there\r
1137 // is nothing we need to do.\r
1138 //\r
1139 goto Exit;\r
1140 } else {\r
1141 //\r
1142 // Signal presence of this processor\r
1143 //\r
1144 if (ReleaseSemaphore (mSmmMpSyncData->Counter) == 0) {\r
1145 //\r
1146 // BSP has already ended the synchronization, so QUIT!!!\r
1147 //\r
1148\r
1149 //\r
1150 // Wait for BSP's signal to finish SMI\r
1151 //\r
1152 while (*mSmmMpSyncData->AllCpusInSync) {\r
1153 CpuPause ();\r
1154 }\r
1155 goto Exit;\r
1156 } else {\r
1157\r
1158 //\r
1159 // The BUSY lock is initialized to Released state.\r
1160 // This needs to be done early enough to be ready for BSP's SmmStartupThisAp() call.\r
1161 // E.g., with Relaxed AP flow, SmmStartupThisAp() may be called immediately\r
1162 // after AP's present flag is detected.\r
1163 //\r
1164 InitializeSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
1165 }\r
1166\r
1167 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r
1168 ActivateSmmProfile (CpuIndex);\r
1169 }\r
1170\r
1171 if (BspInProgress) {\r
1172 //\r
1173 // BSP has been elected. Follow AP path, regardless of ValidSmi flag\r
1174 // as BSP may have cleared the SMI status\r
1175 //\r
1176 APHandler (CpuIndex, ValidSmi, mSmmMpSyncData->EffectiveSyncMode);\r
1177 } else {\r
1178 //\r
1179 // We have a valid SMI\r
1180 //\r
1181\r
1182 //\r
1183 // Elect BSP\r
1184 //\r
1185 IsBsp = FALSE;\r
1186 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {\r
1187 if (!mSmmMpSyncData->SwitchBsp || mSmmMpSyncData->CandidateBsp[CpuIndex]) {\r
1188 //\r
1189 // Call platform hook to do BSP election\r
1190 //\r
1191 Status = PlatformSmmBspElection (&IsBsp);\r
1192 if (EFI_SUCCESS == Status) {\r
1193 //\r
1194 // Platform hook determines successfully\r
1195 //\r
1196 if (IsBsp) {\r
1197 mSmmMpSyncData->BspIndex = (UINT32)CpuIndex;\r
1198 }\r
1199 } else {\r
1200 //\r
1201 // Platform hook fails to determine, use default BSP election method\r
1202 //\r
1203 InterlockedCompareExchange32 (\r
1204 (UINT32*)&mSmmMpSyncData->BspIndex,\r
1205 (UINT32)-1,\r
1206 (UINT32)CpuIndex\r
1207 );\r
1208 }\r
1209 }\r
1210 }\r
1211\r
1212 //\r
1213 // "mSmmMpSyncData->BspIndex == CpuIndex" means this is the BSP\r
1214 //\r
1215 if (mSmmMpSyncData->BspIndex == CpuIndex) {\r
1216\r
1217 //\r
1218 // Clear last request for SwitchBsp.\r
1219 //\r
1220 if (mSmmMpSyncData->SwitchBsp) {\r
1221 mSmmMpSyncData->SwitchBsp = FALSE;\r
1222 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
1223 mSmmMpSyncData->CandidateBsp[Index] = FALSE;\r
1224 }\r
1225 }\r
1226\r
1227 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r
1228 SmmProfileRecordSmiNum ();\r
1229 }\r
1230\r
1231 //\r
1232 // BSP Handler is always called with a ValidSmi == TRUE\r
1233 //\r
1234 BSPHandler (CpuIndex, mSmmMpSyncData->EffectiveSyncMode);\r
1235 } else {\r
1236 APHandler (CpuIndex, ValidSmi, mSmmMpSyncData->EffectiveSyncMode);\r
1237 }\r
1238 }\r
1239\r
1240 ASSERT (*mSmmMpSyncData->CpuData[CpuIndex].Run == 0);\r
1241\r
1242 //\r
1243 // Wait for BSP's signal to exit SMI\r
1244 //\r
1245 while (*mSmmMpSyncData->AllCpusInSync) {\r
1246 CpuPause ();\r
1247 }\r
1248 }\r
1249\r
1250Exit:\r
1251 SmmCpuFeaturesRendezvousExit (CpuIndex);\r
1252\r
1253 //\r
1254 // Restore Cr2\r
1255 //\r
1256 RestoreCr2 (Cr2);\r
1257}\r
1258\r
1259/**\r
1260 Allocate buffer for all semaphores and spin locks.\r
1261\r
1262**/\r
1263VOID\r
1264InitializeSmmCpuSemaphores (\r
1265 VOID\r
1266 )\r
1267{\r
1268 UINTN ProcessorCount;\r
1269 UINTN TotalSize;\r
1270 UINTN GlobalSemaphoresSize;\r
1271 UINTN CpuSemaphoresSize;\r
1272 UINTN SemaphoreSize;\r
1273 UINTN Pages;\r
1274 UINTN *SemaphoreBlock;\r
1275 UINTN SemaphoreAddr;\r
1276\r
1277 SemaphoreSize = GetSpinLockProperties ();\r
1278 ProcessorCount = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;\r
1279 GlobalSemaphoresSize = (sizeof (SMM_CPU_SEMAPHORE_GLOBAL) / sizeof (VOID *)) * SemaphoreSize;\r
1280 CpuSemaphoresSize = (sizeof (SMM_CPU_SEMAPHORE_CPU) / sizeof (VOID *)) * ProcessorCount * SemaphoreSize;\r
1281 TotalSize = GlobalSemaphoresSize + CpuSemaphoresSize;\r
1282 DEBUG((EFI_D_INFO, "One Semaphore Size = 0x%x\n", SemaphoreSize));\r
1283 DEBUG((EFI_D_INFO, "Total Semaphores Size = 0x%x\n", TotalSize));\r
1284 Pages = EFI_SIZE_TO_PAGES (TotalSize);\r
1285 SemaphoreBlock = AllocatePages (Pages);\r
1286 ASSERT (SemaphoreBlock != NULL);\r
1287 ZeroMem (SemaphoreBlock, TotalSize);\r
1288\r
1289 SemaphoreAddr = (UINTN)SemaphoreBlock;\r
1290 mSmmCpuSemaphores.SemaphoreGlobal.Counter = (UINT32 *)SemaphoreAddr;\r
1291 SemaphoreAddr += SemaphoreSize;\r
1292 mSmmCpuSemaphores.SemaphoreGlobal.InsideSmm = (BOOLEAN *)SemaphoreAddr;\r
1293 SemaphoreAddr += SemaphoreSize;\r
1294 mSmmCpuSemaphores.SemaphoreGlobal.AllCpusInSync = (BOOLEAN *)SemaphoreAddr;\r
1295 SemaphoreAddr += SemaphoreSize;\r
1296 mSmmCpuSemaphores.SemaphoreGlobal.PFLock = (SPIN_LOCK *)SemaphoreAddr;\r
1297 SemaphoreAddr += SemaphoreSize;\r
1298 mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock\r
1299 = (SPIN_LOCK *)SemaphoreAddr;\r
1300 SemaphoreAddr += SemaphoreSize;\r
1301\r
1302 SemaphoreAddr = (UINTN)SemaphoreBlock + GlobalSemaphoresSize;\r
1303 mSmmCpuSemaphores.SemaphoreCpu.Busy = (SPIN_LOCK *)SemaphoreAddr;\r
1304 SemaphoreAddr += ProcessorCount * SemaphoreSize;\r
1305 mSmmCpuSemaphores.SemaphoreCpu.Run = (UINT32 *)SemaphoreAddr;\r
1306 SemaphoreAddr += ProcessorCount * SemaphoreSize;\r
1307 mSmmCpuSemaphores.SemaphoreCpu.Present = (BOOLEAN *)SemaphoreAddr;\r
1308\r
1309 mPFLock = mSmmCpuSemaphores.SemaphoreGlobal.PFLock;\r
1310 mConfigSmmCodeAccessCheckLock = mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock;\r
1311\r
1312 mSemaphoreSize = SemaphoreSize;\r
1313}\r
1314\r
1315/**\r
1316 Initialize un-cacheable data.\r
1317\r
1318**/\r
1319VOID\r
1320EFIAPI\r
1321InitializeMpSyncData (\r
1322 VOID\r
1323 )\r
1324{\r
1325 UINTN CpuIndex;\r
1326\r
1327 if (mSmmMpSyncData != NULL) {\r
1328 //\r
1329 // mSmmMpSyncDataSize includes one structure of SMM_DISPATCHER_MP_SYNC_DATA, one\r
1330 // CpuData array of SMM_CPU_DATA_BLOCK and one CandidateBsp array of BOOLEAN.\r
1331 //\r
1332 ZeroMem (mSmmMpSyncData, mSmmMpSyncDataSize);\r
1333 mSmmMpSyncData->CpuData = (SMM_CPU_DATA_BLOCK *)((UINT8 *)mSmmMpSyncData + sizeof (SMM_DISPATCHER_MP_SYNC_DATA));\r
1334 mSmmMpSyncData->CandidateBsp = (BOOLEAN *)(mSmmMpSyncData->CpuData + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus);\r
1335 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {\r
1336 //\r
1337 // Enable BSP election by setting BspIndex to -1\r
1338 //\r
1339 mSmmMpSyncData->BspIndex = (UINT32)-1;\r
1340 }\r
1341 mSmmMpSyncData->EffectiveSyncMode = mCpuSmmSyncMode;\r
1342\r
1343 mSmmMpSyncData->Counter = mSmmCpuSemaphores.SemaphoreGlobal.Counter;\r
1344 mSmmMpSyncData->InsideSmm = mSmmCpuSemaphores.SemaphoreGlobal.InsideSmm;\r
1345 mSmmMpSyncData->AllCpusInSync = mSmmCpuSemaphores.SemaphoreGlobal.AllCpusInSync;\r
1346 ASSERT (mSmmMpSyncData->Counter != NULL && mSmmMpSyncData->InsideSmm != NULL &&\r
1347 mSmmMpSyncData->AllCpusInSync != NULL);\r
1348 *mSmmMpSyncData->Counter = 0;\r
1349 *mSmmMpSyncData->InsideSmm = FALSE;\r
1350 *mSmmMpSyncData->AllCpusInSync = FALSE;\r
1351\r
1352 for (CpuIndex = 0; CpuIndex < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; CpuIndex ++) {\r
1353 mSmmMpSyncData->CpuData[CpuIndex].Busy =\r
1354 (SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Busy + mSemaphoreSize * CpuIndex);\r
1355 mSmmMpSyncData->CpuData[CpuIndex].Run =\r
1356 (UINT32 *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Run + mSemaphoreSize * CpuIndex);\r
1357 mSmmMpSyncData->CpuData[CpuIndex].Present =\r
1358 (BOOLEAN *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Present + mSemaphoreSize * CpuIndex);\r
1359 *(mSmmMpSyncData->CpuData[CpuIndex].Busy) = 0;\r
1360 *(mSmmMpSyncData->CpuData[CpuIndex].Run) = 0;\r
1361 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;\r
1362 }\r
1363 }\r
1364}\r
1365\r
1366/**\r
1367 Initialize global data for MP synchronization.\r
1368\r
1369 @param Stacks Base address of SMI stack buffer for all processors.\r
1370 @param StackSize Stack size for each processor in SMM.\r
1371 @param ShadowStackSize Shadow Stack size for each processor in SMM.\r
1372\r
1373**/\r
1374UINT32\r
1375InitializeMpServiceData (\r
1376 IN VOID *Stacks,\r
1377 IN UINTN StackSize,\r
1378 IN UINTN ShadowStackSize\r
1379 )\r
1380{\r
1381 UINT32 Cr3;\r
1382 UINTN Index;\r
1383 UINT8 *GdtTssTables;\r
1384 UINTN GdtTableStepSize;\r
1385 CPUID_VERSION_INFO_EDX RegEdx;\r
1386\r
1387 //\r
1388 // Determine if this CPU supports machine check\r
1389 //\r
1390 AsmCpuid (CPUID_VERSION_INFO, NULL, NULL, NULL, &RegEdx.Uint32);\r
1391 mMachineCheckSupported = (BOOLEAN)(RegEdx.Bits.MCA == 1);\r
1392\r
1393 //\r
1394 // Allocate memory for all locks and semaphores\r
1395 //\r
1396 InitializeSmmCpuSemaphores ();\r
1397\r
1398 //\r
1399 // Initialize mSmmMpSyncData\r
1400 //\r
1401 mSmmMpSyncDataSize = sizeof (SMM_DISPATCHER_MP_SYNC_DATA) +\r
1402 (sizeof (SMM_CPU_DATA_BLOCK) + sizeof (BOOLEAN)) * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;\r
1403 mSmmMpSyncData = (SMM_DISPATCHER_MP_SYNC_DATA*) AllocatePages (EFI_SIZE_TO_PAGES (mSmmMpSyncDataSize));\r
1404 ASSERT (mSmmMpSyncData != NULL);\r
1405 mCpuSmmSyncMode = (SMM_CPU_SYNC_MODE)PcdGet8 (PcdCpuSmmSyncMode);\r
1406 InitializeMpSyncData ();\r
1407\r
1408 //\r
1409 // Initialize physical address mask\r
1410 // NOTE: Physical memory above virtual address limit is not supported !!!\r
1411 //\r
1412 AsmCpuid (0x80000008, (UINT32*)&Index, NULL, NULL, NULL);\r
1413 gPhyMask = LShiftU64 (1, (UINT8)Index) - 1;\r
1414 gPhyMask &= (1ull << 48) - EFI_PAGE_SIZE;\r
1415\r
1416 //\r
1417 // Create page tables\r
1418 //\r
1419 Cr3 = SmmInitPageTable ();\r
1420\r
1421 GdtTssTables = InitGdt (Cr3, &GdtTableStepSize);\r
1422\r
1423 //\r
1424 // Install SMI handler for each CPU\r
1425 //\r
1426 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
1427 InstallSmiHandler (\r
1428 Index,\r
1429 (UINT32)mCpuHotPlugData.SmBase[Index],\r
1430 (VOID*)((UINTN)Stacks + (StackSize + ShadowStackSize) * Index),\r
1431 StackSize,\r
1432 (UINTN)(GdtTssTables + GdtTableStepSize * Index),\r
1433 gcSmiGdtr.Limit + 1,\r
1434 gcSmiIdtr.Base,\r
1435 gcSmiIdtr.Limit + 1,\r
1436 Cr3\r
1437 );\r
1438 }\r
1439\r
1440 //\r
1441 // Record current MTRR settings\r
1442 //\r
1443 ZeroMem (&gSmiMtrrs, sizeof (gSmiMtrrs));\r
1444 MtrrGetAllMtrrs (&gSmiMtrrs);\r
1445\r
1446 return Cr3;\r
1447}\r
1448\r
1449/**\r
1450\r
1451 Register the SMM Foundation entry point.\r
1452\r
1453 @param This Pointer to EFI_SMM_CONFIGURATION_PROTOCOL instance\r
1454 @param SmmEntryPoint SMM Foundation EntryPoint\r
1455\r
1456 @retval EFI_SUCCESS Successfully to register SMM foundation entry point\r
1457\r
1458**/\r
1459EFI_STATUS\r
1460EFIAPI\r
1461RegisterSmmEntry (\r
1462 IN CONST EFI_SMM_CONFIGURATION_PROTOCOL *This,\r
1463 IN EFI_SMM_ENTRY_POINT SmmEntryPoint\r
1464 )\r
1465{\r
1466 //\r
1467 // Record SMM Foundation EntryPoint, later invoke it on SMI entry vector.\r
1468 //\r
1469 gSmmCpuPrivate->SmmCoreEntry = SmmEntryPoint;\r
1470 return EFI_SUCCESS;\r
1471}\r