]> git.proxmox.com Git - mirror_edk2.git/blame - UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c
ArmPkg/ArmDmaLib: use double buffering only for bus master write
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / MpService.c
CommitLineData
529a5a86
MK
1/** @file\r
2SMM MP service implementation\r
3\r
e62a0eb6 4Copyright (c) 2009 - 2017, Intel Corporation. All rights reserved.<BR>\r
241f9149
LD
5Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>\r
6\r
529a5a86
MK
7This program and the accompanying materials\r
8are licensed and made available under the terms and conditions of the BSD License\r
9which accompanies this distribution. The full text of the license may be found at\r
10http://opensource.org/licenses/bsd-license.php\r
11\r
12THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
13WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
14\r
15**/\r
16\r
17#include "PiSmmCpuDxeSmm.h"\r
18\r
19//\r
20// Slots for all MTRR( FIXED MTRR + VARIABLE MTRR + MTRR_LIB_IA32_MTRR_DEF_TYPE)\r
21//\r
26ab5ac3 22MTRR_SETTINGS gSmiMtrrs;\r
529a5a86
MK
23UINT64 gPhyMask;\r
24SMM_DISPATCHER_MP_SYNC_DATA *mSmmMpSyncData = NULL;\r
25UINTN mSmmMpSyncDataSize;\r
1d648531
JF
26SMM_CPU_SEMAPHORES mSmmCpuSemaphores;\r
27UINTN mSemaphoreSize;\r
fe3a75bc 28SPIN_LOCK *mPFLock = NULL;\r
b43dd229 29SMM_CPU_SYNC_MODE mCpuSmmSyncMode;\r
529a5a86
MK
30\r
31/**\r
32 Performs an atomic compare exchange operation to get semaphore.\r
33 The compare exchange operation must be performed using\r
34 MP safe mechanisms.\r
35\r
36 @param Sem IN: 32-bit unsigned integer\r
37 OUT: original integer - 1\r
38 @return Original integer - 1\r
39\r
40**/\r
41UINT32\r
42WaitForSemaphore (\r
43 IN OUT volatile UINT32 *Sem\r
44 )\r
45{\r
46 UINT32 Value;\r
47\r
48 do {\r
49 Value = *Sem;\r
50 } while (Value == 0 ||\r
51 InterlockedCompareExchange32 (\r
52 (UINT32*)Sem,\r
53 Value,\r
54 Value - 1\r
55 ) != Value);\r
56 return Value - 1;\r
57}\r
58\r
59\r
60/**\r
61 Performs an atomic compare exchange operation to release semaphore.\r
62 The compare exchange operation must be performed using\r
63 MP safe mechanisms.\r
64\r
65 @param Sem IN: 32-bit unsigned integer\r
66 OUT: original integer + 1\r
67 @return Original integer + 1\r
68\r
69**/\r
70UINT32\r
71ReleaseSemaphore (\r
72 IN OUT volatile UINT32 *Sem\r
73 )\r
74{\r
75 UINT32 Value;\r
76\r
77 do {\r
78 Value = *Sem;\r
79 } while (Value + 1 != 0 &&\r
80 InterlockedCompareExchange32 (\r
81 (UINT32*)Sem,\r
82 Value,\r
83 Value + 1\r
84 ) != Value);\r
85 return Value + 1;\r
86}\r
87\r
88/**\r
89 Performs an atomic compare exchange operation to lock semaphore.\r
90 The compare exchange operation must be performed using\r
91 MP safe mechanisms.\r
92\r
93 @param Sem IN: 32-bit unsigned integer\r
94 OUT: -1\r
95 @return Original integer\r
96\r
97**/\r
98UINT32\r
99LockdownSemaphore (\r
100 IN OUT volatile UINT32 *Sem\r
101 )\r
102{\r
103 UINT32 Value;\r
104\r
105 do {\r
106 Value = *Sem;\r
107 } while (InterlockedCompareExchange32 (\r
108 (UINT32*)Sem,\r
109 Value, (UINT32)-1\r
110 ) != Value);\r
111 return Value;\r
112}\r
113\r
114/**\r
115 Wait all APs to performs an atomic compare exchange operation to release semaphore.\r
116\r
117 @param NumberOfAPs AP number\r
118\r
119**/\r
120VOID\r
121WaitForAllAPs (\r
122 IN UINTN NumberOfAPs\r
123 )\r
124{\r
125 UINTN BspIndex;\r
126\r
127 BspIndex = mSmmMpSyncData->BspIndex;\r
128 while (NumberOfAPs-- > 0) {\r
ed3d5ecb 129 WaitForSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
529a5a86
MK
130 }\r
131}\r
132\r
133/**\r
134 Performs an atomic compare exchange operation to release semaphore\r
135 for each AP.\r
136\r
137**/\r
138VOID\r
139ReleaseAllAPs (\r
140 VOID\r
141 )\r
142{\r
143 UINTN Index;\r
144 UINTN BspIndex;\r
145\r
146 BspIndex = mSmmMpSyncData->BspIndex;\r
147 for (Index = mMaxNumberOfCpus; Index-- > 0;) {\r
ed3d5ecb
JF
148 if (Index != BspIndex && *(mSmmMpSyncData->CpuData[Index].Present)) {\r
149 ReleaseSemaphore (mSmmMpSyncData->CpuData[Index].Run);\r
529a5a86
MK
150 }\r
151 }\r
152}\r
153\r
154/**\r
155 Checks if all CPUs (with certain exceptions) have checked in for this SMI run\r
156\r
157 @param Exceptions CPU Arrival exception flags.\r
158\r
159 @retval TRUE if all CPUs the have checked in.\r
160 @retval FALSE if at least one Normal AP hasn't checked in.\r
161\r
162**/\r
163BOOLEAN\r
164AllCpusInSmmWithExceptions (\r
165 SMM_CPU_ARRIVAL_EXCEPTIONS Exceptions\r
166 )\r
167{\r
168 UINTN Index;\r
169 SMM_CPU_DATA_BLOCK *CpuData;\r
170 EFI_PROCESSOR_INFORMATION *ProcessorInfo;\r
171\r
fe3a75bc 172 ASSERT (*mSmmMpSyncData->Counter <= mNumberOfCpus);\r
529a5a86 173\r
fe3a75bc 174 if (*mSmmMpSyncData->Counter == mNumberOfCpus) {\r
529a5a86
MK
175 return TRUE;\r
176 }\r
177\r
178 CpuData = mSmmMpSyncData->CpuData;\r
179 ProcessorInfo = gSmmCpuPrivate->ProcessorInfo;\r
180 for (Index = mMaxNumberOfCpus; Index-- > 0;) {\r
ed3d5ecb 181 if (!(*(CpuData[Index].Present)) && ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID) {\r
529a5a86
MK
182 if (((Exceptions & ARRIVAL_EXCEPTION_DELAYED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmDelayed) != 0) {\r
183 continue;\r
184 }\r
185 if (((Exceptions & ARRIVAL_EXCEPTION_BLOCKED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmBlocked) != 0) {\r
186 continue;\r
187 }\r
188 if (((Exceptions & ARRIVAL_EXCEPTION_SMI_DISABLED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmEnable) != 0) {\r
189 continue;\r
190 }\r
191 return FALSE;\r
192 }\r
193 }\r
194\r
195\r
196 return TRUE;\r
197}\r
198\r
12c66382
ED
199/**\r
200 Has OS enabled Lmce in the MSR_IA32_MCG_EXT_CTL\r
201 \r
202 @retval TRUE Os enable lmce.\r
203 @retval FALSE Os not enable lmce.\r
204\r
205**/\r
206BOOLEAN\r
207IsLmceOsEnabled (\r
208 VOID\r
209 )\r
210{\r
211 MSR_IA32_MCG_CAP_REGISTER McgCap;\r
212 MSR_IA32_FEATURE_CONTROL_REGISTER FeatureCtrl;\r
213 MSR_IA32_MCG_EXT_CTL_REGISTER McgExtCtrl;\r
214\r
215 McgCap.Uint64 = AsmReadMsr64 (MSR_IA32_MCG_CAP);\r
216 if (McgCap.Bits.MCG_LMCE_P == 0) {\r
217 return FALSE;\r
218 }\r
219\r
220 FeatureCtrl.Uint64 = AsmReadMsr64 (MSR_IA32_FEATURE_CONTROL);\r
221 if (FeatureCtrl.Bits.LmceOn == 0) {\r
222 return FALSE;\r
223 }\r
224\r
225 McgExtCtrl.Uint64 = AsmReadMsr64 (MSR_IA32_MCG_EXT_CTL);\r
226 return (BOOLEAN) (McgExtCtrl.Bits.LMCE_EN == 1);\r
227}\r
228\r
229/**\r
230 Return if Local machine check exception signaled. \r
231\r
232 Indicates (when set) that a local machine check exception was generated. This indicates that the current machine-check event was \r
233 delivered to only the logical processor.\r
234\r
235 @retval TRUE LMCE was signaled.\r
236 @retval FALSE LMCE was not signaled.\r
237\r
238**/\r
239BOOLEAN\r
240IsLmceSignaled (\r
241 VOID\r
242 )\r
243{\r
244 MSR_IA32_MCG_STATUS_REGISTER McgStatus;\r
245\r
246 McgStatus.Uint64 = AsmReadMsr64 (MSR_IA32_MCG_STATUS);\r
247 return (BOOLEAN) (McgStatus.Bits.LMCE_S == 1);\r
248}\r
529a5a86
MK
249\r
250/**\r
251 Given timeout constraint, wait for all APs to arrive, and insure when this function returns, no AP will execute normal mode code before\r
252 entering SMM, except SMI disabled APs.\r
253\r
254**/\r
255VOID\r
256SmmWaitForApArrival (\r
257 VOID\r
258 )\r
259{\r
260 UINT64 Timer;\r
261 UINTN Index;\r
12c66382
ED
262 BOOLEAN LmceEn;\r
263 BOOLEAN LmceSignal;\r
529a5a86 264\r
fe3a75bc 265 ASSERT (*mSmmMpSyncData->Counter <= mNumberOfCpus);\r
529a5a86 266\r
12c66382
ED
267 LmceEn = IsLmceOsEnabled ();\r
268 LmceSignal = IsLmceSignaled();\r
269\r
529a5a86
MK
270 //\r
271 // Platform implementor should choose a timeout value appropriately:\r
272 // - The timeout value should balance the SMM time constrains and the likelihood that delayed CPUs are excluded in the SMM run. Note\r
273 // the SMI Handlers must ALWAYS take into account the cases that not all APs are available in an SMI run.\r
274 // - The timeout value must, in the case of 2nd timeout, be at least long enough to give time for all APs to receive the SMI IPI\r
275 // and either enter SMM or buffer the SMI, to insure there is no CPU running normal mode code when SMI handling starts. This will\r
276 // be TRUE even if a blocked CPU is brought out of the blocked state by a normal mode CPU (before the normal mode CPU received the\r
277 // SMI IPI), because with a buffered SMI, and CPU will enter SMM immediately after it is brought out of the blocked state.\r
278 // - The timeout value must be longer than longest possible IO operation in the system\r
279 //\r
280\r
281 //\r
282 // Sync with APs 1st timeout\r
283 //\r
284 for (Timer = StartSyncTimer ();\r
12c66382 285 !IsSyncTimerTimeout (Timer) && !(LmceEn && LmceSignal) &&\r
529a5a86
MK
286 !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED | ARRIVAL_EXCEPTION_SMI_DISABLED );\r
287 ) {\r
288 CpuPause ();\r
289 }\r
290\r
291 //\r
292 // Not all APs have arrived, so we need 2nd round of timeout. IPIs should be sent to ALL none present APs,\r
293 // because:\r
294 // a) Delayed AP may have just come out of the delayed state. Blocked AP may have just been brought out of blocked state by some AP running\r
295 // normal mode code. These APs need to be guaranteed to have an SMI pending to insure that once they are out of delayed / blocked state, they\r
296 // enter SMI immediately without executing instructions in normal mode. Note traditional flow requires there are no APs doing normal mode\r
297 // work while SMI handling is on-going.\r
298 // b) As a consequence of SMI IPI sending, (spurious) SMI may occur after this SMM run.\r
299 // c) ** NOTE **: Use SMI disabling feature VERY CAREFULLY (if at all) for traditional flow, because a processor in SMI-disabled state\r
300 // will execute normal mode code, which breaks the traditional SMI handlers' assumption that no APs are doing normal\r
301 // mode work while SMI handling is on-going.\r
302 // d) We don't add code to check SMI disabling status to skip sending IPI to SMI disabled APs, because:\r
303 // - In traditional flow, SMI disabling is discouraged.\r
304 // - In relaxed flow, CheckApArrival() will check SMI disabling status before calling this function.\r
305 // In both cases, adding SMI-disabling checking code increases overhead.\r
306 //\r
fe3a75bc 307 if (*mSmmMpSyncData->Counter < mNumberOfCpus) {\r
529a5a86
MK
308 //\r
309 // Send SMI IPIs to bring outside processors in\r
310 //\r
311 for (Index = mMaxNumberOfCpus; Index-- > 0;) {\r
ed3d5ecb 312 if (!(*(mSmmMpSyncData->CpuData[Index].Present)) && gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID) {\r
529a5a86
MK
313 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId);\r
314 }\r
315 }\r
316\r
317 //\r
318 // Sync with APs 2nd timeout.\r
319 //\r
320 for (Timer = StartSyncTimer ();\r
321 !IsSyncTimerTimeout (Timer) &&\r
322 !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED | ARRIVAL_EXCEPTION_SMI_DISABLED );\r
323 ) {\r
324 CpuPause ();\r
325 }\r
326 }\r
327\r
328 return;\r
329}\r
330\r
331\r
332/**\r
333 Replace OS MTRR's with SMI MTRR's.\r
334\r
335 @param CpuIndex Processor Index\r
336\r
337**/\r
338VOID\r
339ReplaceOSMtrrs (\r
340 IN UINTN CpuIndex\r
341 )\r
342{\r
529a5a86
MK
343 SmmCpuFeaturesDisableSmrr ();\r
344\r
345 //\r
346 // Replace all MTRRs registers\r
347 //\r
26ab5ac3 348 MtrrSetAllMtrrs (&gSmiMtrrs);\r
529a5a86
MK
349}\r
350\r
351/**\r
352 SMI handler for BSP.\r
353\r
354 @param CpuIndex BSP processor Index\r
355 @param SyncMode SMM MP sync mode\r
356\r
357**/\r
358VOID\r
359BSPHandler (\r
360 IN UINTN CpuIndex,\r
361 IN SMM_CPU_SYNC_MODE SyncMode\r
362 )\r
363{\r
364 UINTN Index;\r
365 MTRR_SETTINGS Mtrrs;\r
366 UINTN ApCount;\r
367 BOOLEAN ClearTopLevelSmiResult;\r
368 UINTN PresentCount;\r
369\r
370 ASSERT (CpuIndex == mSmmMpSyncData->BspIndex);\r
371 ApCount = 0;\r
372\r
373 //\r
374 // Flag BSP's presence\r
375 //\r
fe3a75bc 376 *mSmmMpSyncData->InsideSmm = TRUE;\r
529a5a86
MK
377\r
378 //\r
379 // Initialize Debug Agent to start source level debug in BSP handler\r
380 //\r
381 InitializeDebugAgent (DEBUG_AGENT_INIT_ENTER_SMI, NULL, NULL);\r
382\r
383 //\r
384 // Mark this processor's presence\r
385 //\r
ed3d5ecb 386 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = TRUE;\r
529a5a86
MK
387\r
388 //\r
389 // Clear platform top level SMI status bit before calling SMI handlers. If\r
390 // we cleared it after SMI handlers are run, we would miss the SMI that\r
391 // occurs after SMI handlers are done and before SMI status bit is cleared.\r
392 //\r
393 ClearTopLevelSmiResult = ClearTopLevelSmiStatus();\r
394 ASSERT (ClearTopLevelSmiResult == TRUE);\r
395\r
396 //\r
397 // Set running processor index\r
398 //\r
399 gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu = CpuIndex;\r
400\r
401 //\r
402 // If Traditional Sync Mode or need to configure MTRRs: gather all available APs.\r
403 //\r
404 if (SyncMode == SmmCpuSyncModeTradition || SmmCpuFeaturesNeedConfigureMtrrs()) {\r
405\r
406 //\r
407 // Wait for APs to arrive\r
408 //\r
409 SmmWaitForApArrival();\r
410\r
411 //\r
412 // Lock the counter down and retrieve the number of APs\r
413 //\r
fe3a75bc
JF
414 *mSmmMpSyncData->AllCpusInSync = TRUE;\r
415 ApCount = LockdownSemaphore (mSmmMpSyncData->Counter) - 1;\r
529a5a86
MK
416\r
417 //\r
418 // Wait for all APs to get ready for programming MTRRs\r
419 //\r
420 WaitForAllAPs (ApCount);\r
421\r
422 if (SmmCpuFeaturesNeedConfigureMtrrs()) {\r
423 //\r
424 // Signal all APs it's time for backup MTRRs\r
425 //\r
426 ReleaseAllAPs ();\r
427\r
428 //\r
429 // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at\r
430 // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set\r
431 // to a large enough value to avoid this situation.\r
432 // Note: For HT capable CPUs, threads within a core share the same set of MTRRs.\r
433 // We do the backup first and then set MTRR to avoid race condition for threads\r
434 // in the same core.\r
435 //\r
436 MtrrGetAllMtrrs(&Mtrrs);\r
437\r
438 //\r
439 // Wait for all APs to complete their MTRR saving\r
440 //\r
441 WaitForAllAPs (ApCount);\r
442\r
443 //\r
444 // Let all processors program SMM MTRRs together\r
445 //\r
446 ReleaseAllAPs ();\r
447\r
448 //\r
449 // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at\r
450 // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set\r
451 // to a large enough value to avoid this situation.\r
452 //\r
453 ReplaceOSMtrrs (CpuIndex);\r
454\r
455 //\r
456 // Wait for all APs to complete their MTRR programming\r
457 //\r
458 WaitForAllAPs (ApCount);\r
459 }\r
460 }\r
461\r
462 //\r
463 // The BUSY lock is initialized to Acquired state\r
464 //\r
170a3c1e 465 AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
529a5a86
MK
466\r
467 //\r
9f419739 468 // Perform the pre tasks\r
529a5a86 469 //\r
9f419739 470 PerformPreTasks ();\r
529a5a86
MK
471\r
472 //\r
473 // Invoke SMM Foundation EntryPoint with the processor information context.\r
474 //\r
475 gSmmCpuPrivate->SmmCoreEntry (&gSmmCpuPrivate->SmmCoreEntryContext);\r
476\r
477 //\r
478 // Make sure all APs have completed their pending none-block tasks\r
479 //\r
480 for (Index = mMaxNumberOfCpus; Index-- > 0;) {\r
ed3d5ecb
JF
481 if (Index != CpuIndex && *(mSmmMpSyncData->CpuData[Index].Present)) {\r
482 AcquireSpinLock (mSmmMpSyncData->CpuData[Index].Busy);\r
483 ReleaseSpinLock (mSmmMpSyncData->CpuData[Index].Busy);\r
529a5a86
MK
484 }\r
485 }\r
486\r
487 //\r
488 // Perform the remaining tasks\r
489 //\r
490 PerformRemainingTasks ();\r
491\r
492 //\r
493 // If Relaxed-AP Sync Mode: gather all available APs after BSP SMM handlers are done, and\r
494 // make those APs to exit SMI synchronously. APs which arrive later will be excluded and\r
495 // will run through freely.\r
496 //\r
497 if (SyncMode != SmmCpuSyncModeTradition && !SmmCpuFeaturesNeedConfigureMtrrs()) {\r
498\r
499 //\r
500 // Lock the counter down and retrieve the number of APs\r
501 //\r
fe3a75bc
JF
502 *mSmmMpSyncData->AllCpusInSync = TRUE;\r
503 ApCount = LockdownSemaphore (mSmmMpSyncData->Counter) - 1;\r
529a5a86
MK
504 //\r
505 // Make sure all APs have their Present flag set\r
506 //\r
507 while (TRUE) {\r
508 PresentCount = 0;\r
509 for (Index = mMaxNumberOfCpus; Index-- > 0;) {\r
ed3d5ecb 510 if (*(mSmmMpSyncData->CpuData[Index].Present)) {\r
529a5a86
MK
511 PresentCount ++;\r
512 }\r
513 }\r
514 if (PresentCount > ApCount) {\r
515 break;\r
516 }\r
517 }\r
518 }\r
519\r
520 //\r
521 // Notify all APs to exit\r
522 //\r
fe3a75bc 523 *mSmmMpSyncData->InsideSmm = FALSE;\r
529a5a86
MK
524 ReleaseAllAPs ();\r
525\r
526 //\r
527 // Wait for all APs to complete their pending tasks\r
528 //\r
529 WaitForAllAPs (ApCount);\r
530\r
531 if (SmmCpuFeaturesNeedConfigureMtrrs()) {\r
532 //\r
533 // Signal APs to restore MTRRs\r
534 //\r
535 ReleaseAllAPs ();\r
536\r
537 //\r
538 // Restore OS MTRRs\r
539 //\r
540 SmmCpuFeaturesReenableSmrr ();\r
541 MtrrSetAllMtrrs(&Mtrrs);\r
542\r
543 //\r
544 // Wait for all APs to complete MTRR programming\r
545 //\r
546 WaitForAllAPs (ApCount);\r
547 }\r
548\r
549 //\r
550 // Stop source level debug in BSP handler, the code below will not be\r
551 // debugged.\r
552 //\r
553 InitializeDebugAgent (DEBUG_AGENT_INIT_EXIT_SMI, NULL, NULL);\r
554\r
555 //\r
556 // Signal APs to Reset states/semaphore for this processor\r
557 //\r
558 ReleaseAllAPs ();\r
559\r
560 //\r
561 // Perform pending operations for hot-plug\r
562 //\r
563 SmmCpuUpdate ();\r
564\r
565 //\r
566 // Clear the Present flag of BSP\r
567 //\r
ed3d5ecb 568 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;\r
529a5a86
MK
569\r
570 //\r
571 // Gather APs to exit SMM synchronously. Note the Present flag is cleared by now but\r
572 // WaitForAllAps does not depend on the Present flag.\r
573 //\r
574 WaitForAllAPs (ApCount);\r
575\r
576 //\r
577 // Reset BspIndex to -1, meaning BSP has not been elected.\r
578 //\r
579 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {\r
580 mSmmMpSyncData->BspIndex = (UINT32)-1;\r
581 }\r
582\r
583 //\r
584 // Allow APs to check in from this point on\r
585 //\r
fe3a75bc
JF
586 *mSmmMpSyncData->Counter = 0;\r
587 *mSmmMpSyncData->AllCpusInSync = FALSE;\r
529a5a86
MK
588}\r
589\r
590/**\r
591 SMI handler for AP.\r
592\r
593 @param CpuIndex AP processor Index.\r
594 @param ValidSmi Indicates that current SMI is a valid SMI or not.\r
595 @param SyncMode SMM MP sync mode.\r
596\r
597**/\r
598VOID\r
599APHandler (\r
600 IN UINTN CpuIndex,\r
601 IN BOOLEAN ValidSmi,\r
602 IN SMM_CPU_SYNC_MODE SyncMode\r
603 )\r
604{\r
605 UINT64 Timer;\r
606 UINTN BspIndex;\r
607 MTRR_SETTINGS Mtrrs;\r
608\r
609 //\r
610 // Timeout BSP\r
611 //\r
612 for (Timer = StartSyncTimer ();\r
613 !IsSyncTimerTimeout (Timer) &&\r
fe3a75bc 614 !(*mSmmMpSyncData->InsideSmm);\r
529a5a86
MK
615 ) {\r
616 CpuPause ();\r
617 }\r
618\r
fe3a75bc 619 if (!(*mSmmMpSyncData->InsideSmm)) {\r
529a5a86
MK
620 //\r
621 // BSP timeout in the first round\r
622 //\r
623 if (mSmmMpSyncData->BspIndex != -1) {\r
624 //\r
625 // BSP Index is known\r
626 //\r
627 BspIndex = mSmmMpSyncData->BspIndex;\r
628 ASSERT (CpuIndex != BspIndex);\r
629\r
630 //\r
631 // Send SMI IPI to bring BSP in\r
632 //\r
633 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[BspIndex].ProcessorId);\r
634\r
635 //\r
636 // Now clock BSP for the 2nd time\r
637 //\r
638 for (Timer = StartSyncTimer ();\r
639 !IsSyncTimerTimeout (Timer) &&\r
fe3a75bc 640 !(*mSmmMpSyncData->InsideSmm);\r
529a5a86
MK
641 ) {\r
642 CpuPause ();\r
643 }\r
644\r
fe3a75bc 645 if (!(*mSmmMpSyncData->InsideSmm)) {\r
529a5a86
MK
646 //\r
647 // Give up since BSP is unable to enter SMM\r
648 // and signal the completion of this AP\r
fe3a75bc 649 WaitForSemaphore (mSmmMpSyncData->Counter);\r
529a5a86
MK
650 return;\r
651 }\r
652 } else {\r
653 //\r
654 // Don't know BSP index. Give up without sending IPI to BSP.\r
655 //\r
fe3a75bc 656 WaitForSemaphore (mSmmMpSyncData->Counter);\r
529a5a86
MK
657 return;\r
658 }\r
659 }\r
660\r
661 //\r
662 // BSP is available\r
663 //\r
664 BspIndex = mSmmMpSyncData->BspIndex;\r
665 ASSERT (CpuIndex != BspIndex);\r
666\r
667 //\r
668 // Mark this processor's presence\r
669 //\r
ed3d5ecb 670 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = TRUE;\r
529a5a86
MK
671\r
672 if (SyncMode == SmmCpuSyncModeTradition || SmmCpuFeaturesNeedConfigureMtrrs()) {\r
673 //\r
674 // Notify BSP of arrival at this point\r
675 //\r
ed3d5ecb 676 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
529a5a86
MK
677 }\r
678\r
679 if (SmmCpuFeaturesNeedConfigureMtrrs()) {\r
680 //\r
681 // Wait for the signal from BSP to backup MTRRs\r
682 //\r
ed3d5ecb 683 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r
529a5a86
MK
684\r
685 //\r
686 // Backup OS MTRRs\r
687 //\r
688 MtrrGetAllMtrrs(&Mtrrs);\r
689\r
690 //\r
691 // Signal BSP the completion of this AP\r
692 //\r
ed3d5ecb 693 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
529a5a86
MK
694\r
695 //\r
696 // Wait for BSP's signal to program MTRRs\r
697 //\r
ed3d5ecb 698 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r
529a5a86
MK
699\r
700 //\r
701 // Replace OS MTRRs with SMI MTRRs\r
702 //\r
703 ReplaceOSMtrrs (CpuIndex);\r
704\r
705 //\r
706 // Signal BSP the completion of this AP\r
707 //\r
ed3d5ecb 708 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
529a5a86
MK
709 }\r
710\r
711 while (TRUE) {\r
712 //\r
713 // Wait for something to happen\r
714 //\r
ed3d5ecb 715 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r
529a5a86
MK
716\r
717 //\r
718 // Check if BSP wants to exit SMM\r
719 //\r
fe3a75bc 720 if (!(*mSmmMpSyncData->InsideSmm)) {\r
529a5a86
MK
721 break;\r
722 }\r
723\r
724 //\r
725 // BUSY should be acquired by SmmStartupThisAp()\r
726 //\r
727 ASSERT (\r
ed3d5ecb 728 !AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[CpuIndex].Busy)\r
529a5a86
MK
729 );\r
730\r
731 //\r
732 // Invoke the scheduled procedure\r
733 //\r
734 (*mSmmMpSyncData->CpuData[CpuIndex].Procedure) (\r
735 (VOID*)mSmmMpSyncData->CpuData[CpuIndex].Parameter\r
736 );\r
737\r
738 //\r
739 // Release BUSY\r
740 //\r
ed3d5ecb 741 ReleaseSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
529a5a86
MK
742 }\r
743\r
744 if (SmmCpuFeaturesNeedConfigureMtrrs()) {\r
745 //\r
746 // Notify BSP the readiness of this AP to program MTRRs\r
747 //\r
ed3d5ecb 748 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
529a5a86
MK
749\r
750 //\r
751 // Wait for the signal from BSP to program MTRRs\r
752 //\r
ed3d5ecb 753 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r
529a5a86
MK
754\r
755 //\r
756 // Restore OS MTRRs\r
757 //\r
758 SmmCpuFeaturesReenableSmrr ();\r
759 MtrrSetAllMtrrs(&Mtrrs);\r
760 }\r
761\r
762 //\r
763 // Notify BSP the readiness of this AP to Reset states/semaphore for this processor\r
764 //\r
ed3d5ecb 765 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
529a5a86
MK
766\r
767 //\r
768 // Wait for the signal from BSP to Reset states/semaphore for this processor\r
769 //\r
ed3d5ecb 770 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r
529a5a86
MK
771\r
772 //\r
773 // Reset states/semaphore for this processor\r
774 //\r
ed3d5ecb 775 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;\r
529a5a86
MK
776\r
777 //\r
778 // Notify BSP the readiness of this AP to exit SMM\r
779 //\r
ed3d5ecb 780 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
529a5a86
MK
781\r
782}\r
783\r
784/**\r
785 Create 4G PageTable in SMRAM.\r
786\r
717fb604 787 @param[in] Is32BitPageTable Whether the page table is 32-bit PAE\r
529a5a86
MK
788 @return PageTable Address\r
789\r
790**/\r
791UINT32\r
792Gen4GPageTable (\r
881520ea 793 IN BOOLEAN Is32BitPageTable\r
529a5a86
MK
794 )\r
795{\r
796 VOID *PageTable;\r
797 UINTN Index;\r
798 UINT64 *Pte;\r
799 UINTN PagesNeeded;\r
800 UINTN Low2MBoundary;\r
801 UINTN High2MBoundary;\r
802 UINTN Pages;\r
803 UINTN GuardPage;\r
804 UINT64 *Pdpte;\r
805 UINTN PageIndex;\r
806 UINTN PageAddress;\r
807\r
808 Low2MBoundary = 0;\r
809 High2MBoundary = 0;\r
810 PagesNeeded = 0;\r
811 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
812 //\r
813 // Add one more page for known good stack, then find the lower 2MB aligned address.\r
814 //\r
815 Low2MBoundary = (mSmmStackArrayBase + EFI_PAGE_SIZE) & ~(SIZE_2MB-1);\r
816 //\r
817 // Add two more pages for known good stack and stack guard page,\r
818 // then find the lower 2MB aligned address.\r
819 //\r
820 High2MBoundary = (mSmmStackArrayEnd - mSmmStackSize + EFI_PAGE_SIZE * 2) & ~(SIZE_2MB-1);\r
821 PagesNeeded = ((High2MBoundary - Low2MBoundary) / SIZE_2MB) + 1;\r
822 }\r
823 //\r
824 // Allocate the page table\r
825 //\r
717fb604 826 PageTable = AllocatePageTableMemory (5 + PagesNeeded);\r
529a5a86
MK
827 ASSERT (PageTable != NULL);\r
828\r
717fb604 829 PageTable = (VOID *)((UINTN)PageTable);\r
529a5a86
MK
830 Pte = (UINT64*)PageTable;\r
831\r
832 //\r
833 // Zero out all page table entries first\r
834 //\r
835 ZeroMem (Pte, EFI_PAGES_TO_SIZE (1));\r
836\r
837 //\r
838 // Set Page Directory Pointers\r
839 //\r
840 for (Index = 0; Index < 4; Index++) {\r
e62a0eb6 841 Pte[Index] = ((UINTN)PageTable + EFI_PAGE_SIZE * (Index + 1)) | mAddressEncMask |\r
241f9149 842 (Is32BitPageTable ? IA32_PAE_PDPTE_ATTRIBUTE_BITS : PAGE_ATTRIBUTE_BITS);\r
529a5a86
MK
843 }\r
844 Pte += EFI_PAGE_SIZE / sizeof (*Pte);\r
845\r
846 //\r
847 // Fill in Page Directory Entries\r
848 //\r
849 for (Index = 0; Index < EFI_PAGE_SIZE * 4 / sizeof (*Pte); Index++) {\r
241f9149 850 Pte[Index] = (Index << 21) | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;\r
529a5a86
MK
851 }\r
852\r
853 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
854 Pages = (UINTN)PageTable + EFI_PAGES_TO_SIZE (5);\r
855 GuardPage = mSmmStackArrayBase + EFI_PAGE_SIZE;\r
856 Pdpte = (UINT64*)PageTable;\r
857 for (PageIndex = Low2MBoundary; PageIndex <= High2MBoundary; PageIndex += SIZE_2MB) {\r
241f9149
LD
858 Pte = (UINT64*)(UINTN)(Pdpte[BitFieldRead32 ((UINT32)PageIndex, 30, 31)] & ~mAddressEncMask & ~(EFI_PAGE_SIZE - 1));\r
859 Pte[BitFieldRead32 ((UINT32)PageIndex, 21, 29)] = (UINT64)Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
529a5a86
MK
860 //\r
861 // Fill in Page Table Entries\r
862 //\r
863 Pte = (UINT64*)Pages;\r
864 PageAddress = PageIndex;\r
865 for (Index = 0; Index < EFI_PAGE_SIZE / sizeof (*Pte); Index++) {\r
866 if (PageAddress == GuardPage) {\r
867 //\r
868 // Mark the guard page as non-present\r
869 //\r
241f9149 870 Pte[Index] = PageAddress | mAddressEncMask;\r
529a5a86
MK
871 GuardPage += mSmmStackSize;\r
872 if (GuardPage > mSmmStackArrayEnd) {\r
873 GuardPage = 0;\r
874 }\r
875 } else {\r
241f9149 876 Pte[Index] = PageAddress | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
529a5a86
MK
877 }\r
878 PageAddress+= EFI_PAGE_SIZE;\r
879 }\r
880 Pages += EFI_PAGE_SIZE;\r
881 }\r
882 }\r
883\r
884 return (UINT32)(UINTN)PageTable;\r
885}\r
886\r
529a5a86
MK
887/**\r
888 Schedule a procedure to run on the specified CPU.\r
889\r
717fb604
JY
890 @param[in] Procedure The address of the procedure to run\r
891 @param[in] CpuIndex Target CPU Index\r
367284e7 892 @param[in, out] ProcArguments The parameter to pass to the procedure\r
717fb604 893 @param[in] BlockingMode Startup AP in blocking mode or not\r
529a5a86
MK
894\r
895 @retval EFI_INVALID_PARAMETER CpuNumber not valid\r
896 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP\r
897 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM\r
898 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy\r
899 @retval EFI_SUCCESS The procedure has been successfully scheduled\r
900\r
901**/\r
902EFI_STATUS\r
717fb604 903InternalSmmStartupThisAp (\r
529a5a86
MK
904 IN EFI_AP_PROCEDURE Procedure,\r
905 IN UINTN CpuIndex,\r
717fb604
JY
906 IN OUT VOID *ProcArguments OPTIONAL,\r
907 IN BOOLEAN BlockingMode\r
529a5a86
MK
908 )\r
909{\r
717fb604
JY
910 if (CpuIndex >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus) {\r
911 DEBUG((DEBUG_ERROR, "CpuIndex(%d) >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus(%d)\n", CpuIndex, gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus));\r
912 return EFI_INVALID_PARAMETER;\r
913 }\r
914 if (CpuIndex == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) {\r
915 DEBUG((DEBUG_ERROR, "CpuIndex(%d) == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu\n", CpuIndex));\r
529a5a86
MK
916 return EFI_INVALID_PARAMETER;\r
917 }\r
b7025df8
JF
918 if (gSmmCpuPrivate->ProcessorInfo[CpuIndex].ProcessorId == INVALID_APIC_ID) {\r
919 return EFI_INVALID_PARAMETER;\r
920 }\r
717fb604
JY
921 if (!(*(mSmmMpSyncData->CpuData[CpuIndex].Present))) {\r
922 if (mSmmMpSyncData->EffectiveSyncMode == SmmCpuSyncModeTradition) {\r
923 DEBUG((DEBUG_ERROR, "!mSmmMpSyncData->CpuData[%d].Present\n", CpuIndex));\r
924 }\r
925 return EFI_INVALID_PARAMETER;\r
926 }\r
927 if (gSmmCpuPrivate->Operation[CpuIndex] == SmmCpuRemove) {\r
928 if (!FeaturePcdGet (PcdCpuHotPlugSupport)) {\r
929 DEBUG((DEBUG_ERROR, "gSmmCpuPrivate->Operation[%d] == SmmCpuRemove\n", CpuIndex));\r
930 }\r
931 return EFI_INVALID_PARAMETER;\r
932 }\r
933\r
934 if (BlockingMode) {\r
935 AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
936 } else {\r
937 if (!AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[CpuIndex].Busy)) {\r
938 DEBUG((DEBUG_ERROR, "mSmmMpSyncData->CpuData[%d].Busy\n", CpuIndex));\r
939 return EFI_INVALID_PARAMETER;\r
940 }\r
941 }\r
529a5a86
MK
942\r
943 mSmmMpSyncData->CpuData[CpuIndex].Procedure = Procedure;\r
944 mSmmMpSyncData->CpuData[CpuIndex].Parameter = ProcArguments;\r
ed3d5ecb 945 ReleaseSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r
529a5a86 946\r
717fb604 947 if (BlockingMode) {\r
ed3d5ecb
JF
948 AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
949 ReleaseSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
529a5a86
MK
950 }\r
951 return EFI_SUCCESS;\r
952}\r
953\r
717fb604
JY
954/**\r
955 Schedule a procedure to run on the specified CPU in blocking mode.\r
956\r
957 @param[in] Procedure The address of the procedure to run\r
958 @param[in] CpuIndex Target CPU Index\r
959 @param[in, out] ProcArguments The parameter to pass to the procedure\r
960\r
961 @retval EFI_INVALID_PARAMETER CpuNumber not valid\r
962 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP\r
963 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM\r
964 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy\r
965 @retval EFI_SUCCESS The procedure has been successfully scheduled\r
966\r
967**/\r
968EFI_STATUS\r
969EFIAPI\r
970SmmBlockingStartupThisAp (\r
971 IN EFI_AP_PROCEDURE Procedure,\r
972 IN UINTN CpuIndex,\r
973 IN OUT VOID *ProcArguments OPTIONAL\r
974 )\r
975{\r
976 return InternalSmmStartupThisAp(Procedure, CpuIndex, ProcArguments, TRUE);\r
977}\r
978\r
979/**\r
980 Schedule a procedure to run on the specified CPU.\r
981\r
982 @param Procedure The address of the procedure to run\r
983 @param CpuIndex Target CPU Index\r
984 @param ProcArguments The parameter to pass to the procedure\r
985\r
986 @retval EFI_INVALID_PARAMETER CpuNumber not valid\r
987 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP\r
988 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM\r
989 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy\r
990 @retval EFI_SUCCESS The procedure has been successfully scheduled\r
991\r
992**/\r
993EFI_STATUS\r
994EFIAPI\r
995SmmStartupThisAp (\r
996 IN EFI_AP_PROCEDURE Procedure,\r
997 IN UINTN CpuIndex,\r
998 IN OUT VOID *ProcArguments OPTIONAL\r
999 )\r
1000{\r
1001 return InternalSmmStartupThisAp(Procedure, CpuIndex, ProcArguments, FeaturePcdGet (PcdCpuSmmBlockStartupThisAp));\r
1002}\r
1003\r
f45f2d4a 1004/**\r
3eed6dda 1005 This function sets DR6 & DR7 according to SMM save state, before running SMM C code.\r
f45f2d4a
JY
1006 They are useful when you want to enable hardware breakpoints in SMM without entry SMM mode.\r
1007\r
1008 NOTE: It might not be appreciated in runtime since it might\r
1009 conflict with OS debugging facilities. Turn them off in RELEASE.\r
1010\r
1011 @param CpuIndex CPU Index\r
1012\r
1013**/\r
1014VOID\r
1015EFIAPI\r
1016CpuSmmDebugEntry (\r
1017 IN UINTN CpuIndex\r
1018 )\r
1019{\r
1020 SMRAM_SAVE_STATE_MAP *CpuSaveState;\r
1021 \r
1022 if (FeaturePcdGet (PcdCpuSmmDebug)) {\r
717fb604 1023 ASSERT(CpuIndex < mMaxNumberOfCpus);\r
3eed6dda 1024 CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];\r
f45f2d4a
JY
1025 if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {\r
1026 AsmWriteDr6 (CpuSaveState->x86._DR6);\r
1027 AsmWriteDr7 (CpuSaveState->x86._DR7);\r
1028 } else {\r
1029 AsmWriteDr6 ((UINTN)CpuSaveState->x64._DR6);\r
1030 AsmWriteDr7 ((UINTN)CpuSaveState->x64._DR7);\r
1031 }\r
1032 }\r
1033}\r
1034\r
1035/**\r
3eed6dda 1036 This function restores DR6 & DR7 to SMM save state.\r
f45f2d4a
JY
1037\r
1038 NOTE: It might not be appreciated in runtime since it might\r
1039 conflict with OS debugging facilities. Turn them off in RELEASE.\r
1040\r
1041 @param CpuIndex CPU Index\r
1042\r
1043**/\r
1044VOID\r
1045EFIAPI\r
1046CpuSmmDebugExit (\r
1047 IN UINTN CpuIndex\r
1048 )\r
1049{\r
1050 SMRAM_SAVE_STATE_MAP *CpuSaveState;\r
1051\r
1052 if (FeaturePcdGet (PcdCpuSmmDebug)) {\r
717fb604 1053 ASSERT(CpuIndex < mMaxNumberOfCpus);\r
3eed6dda 1054 CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];\r
f45f2d4a
JY
1055 if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {\r
1056 CpuSaveState->x86._DR7 = (UINT32)AsmReadDr7 ();\r
1057 CpuSaveState->x86._DR6 = (UINT32)AsmReadDr6 ();\r
1058 } else {\r
1059 CpuSaveState->x64._DR7 = AsmReadDr7 ();\r
1060 CpuSaveState->x64._DR6 = AsmReadDr6 ();\r
1061 }\r
1062 }\r
1063}\r
1064\r
529a5a86
MK
1065/**\r
1066 C function for SMI entry, each processor comes here upon SMI trigger.\r
1067\r
1068 @param CpuIndex CPU Index\r
1069\r
1070**/\r
1071VOID\r
1072EFIAPI\r
1073SmiRendezvous (\r
1074 IN UINTN CpuIndex\r
1075 )\r
1076{\r
f85d3ce2
JF
1077 EFI_STATUS Status;\r
1078 BOOLEAN ValidSmi;\r
1079 BOOLEAN IsBsp;\r
1080 BOOLEAN BspInProgress;\r
1081 UINTN Index;\r
1082 UINTN Cr2;\r
717fb604
JY
1083\r
1084 ASSERT(CpuIndex < mMaxNumberOfCpus);\r
529a5a86
MK
1085\r
1086 //\r
1087 // Save Cr2 because Page Fault exception in SMM may override its value\r
1088 //\r
1089 Cr2 = AsmReadCr2 ();\r
1090\r
1091 //\r
1092 // Perform CPU specific entry hooks\r
1093 //\r
1094 SmmCpuFeaturesRendezvousEntry (CpuIndex);\r
1095\r
1096 //\r
1097 // Determine if this is a valid SMI\r
1098 //\r
1099 ValidSmi = PlatformValidSmi();\r
1100\r
1101 //\r
1102 // Determine if BSP has been already in progress. Note this must be checked after\r
1103 // ValidSmi because BSP may clear a valid SMI source after checking in.\r
1104 //\r
fe3a75bc 1105 BspInProgress = *mSmmMpSyncData->InsideSmm;\r
529a5a86
MK
1106\r
1107 if (!BspInProgress && !ValidSmi) {\r
1108 //\r
1109 // If we reach here, it means when we sampled the ValidSmi flag, SMI status had not\r
1110 // been cleared by BSP in a new SMI run (so we have a truly invalid SMI), or SMI\r
1111 // status had been cleared by BSP and an existing SMI run has almost ended. (Note\r
1112 // we sampled ValidSmi flag BEFORE judging BSP-in-progress status.) In both cases, there\r
1113 // is nothing we need to do.\r
1114 //\r
1115 goto Exit;\r
1116 } else {\r
1117 //\r
1118 // Signal presence of this processor\r
1119 //\r
fe3a75bc 1120 if (ReleaseSemaphore (mSmmMpSyncData->Counter) == 0) {\r
529a5a86
MK
1121 //\r
1122 // BSP has already ended the synchronization, so QUIT!!!\r
1123 //\r
1124\r
1125 //\r
1126 // Wait for BSP's signal to finish SMI\r
1127 //\r
fe3a75bc 1128 while (*mSmmMpSyncData->AllCpusInSync) {\r
529a5a86
MK
1129 CpuPause ();\r
1130 }\r
1131 goto Exit;\r
1132 } else {\r
1133\r
1134 //\r
1135 // The BUSY lock is initialized to Released state.\r
1136 // This needs to be done early enough to be ready for BSP's SmmStartupThisAp() call.\r
1137 // E.g., with Relaxed AP flow, SmmStartupThisAp() may be called immediately\r
1138 // after AP's present flag is detected.\r
1139 //\r
ed3d5ecb 1140 InitializeSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
529a5a86
MK
1141 }\r
1142\r
529a5a86
MK
1143 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r
1144 ActivateSmmProfile (CpuIndex);\r
1145 }\r
1146\r
1147 if (BspInProgress) {\r
1148 //\r
1149 // BSP has been elected. Follow AP path, regardless of ValidSmi flag\r
1150 // as BSP may have cleared the SMI status\r
1151 //\r
1152 APHandler (CpuIndex, ValidSmi, mSmmMpSyncData->EffectiveSyncMode);\r
1153 } else {\r
1154 //\r
1155 // We have a valid SMI\r
1156 //\r
1157\r
1158 //\r
1159 // Elect BSP\r
1160 //\r
1161 IsBsp = FALSE;\r
1162 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {\r
1163 if (!mSmmMpSyncData->SwitchBsp || mSmmMpSyncData->CandidateBsp[CpuIndex]) {\r
1164 //\r
1165 // Call platform hook to do BSP election\r
1166 //\r
1167 Status = PlatformSmmBspElection (&IsBsp);\r
1168 if (EFI_SUCCESS == Status) {\r
1169 //\r
1170 // Platform hook determines successfully\r
1171 //\r
1172 if (IsBsp) {\r
1173 mSmmMpSyncData->BspIndex = (UINT32)CpuIndex;\r
1174 }\r
1175 } else {\r
1176 //\r
1177 // Platform hook fails to determine, use default BSP election method\r
1178 //\r
1179 InterlockedCompareExchange32 (\r
1180 (UINT32*)&mSmmMpSyncData->BspIndex,\r
1181 (UINT32)-1,\r
1182 (UINT32)CpuIndex\r
1183 );\r
1184 }\r
1185 }\r
1186 }\r
1187\r
1188 //\r
1189 // "mSmmMpSyncData->BspIndex == CpuIndex" means this is the BSP\r
1190 //\r
1191 if (mSmmMpSyncData->BspIndex == CpuIndex) {\r
1192\r
1193 //\r
1194 // Clear last request for SwitchBsp.\r
1195 //\r
1196 if (mSmmMpSyncData->SwitchBsp) {\r
1197 mSmmMpSyncData->SwitchBsp = FALSE;\r
1198 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
1199 mSmmMpSyncData->CandidateBsp[Index] = FALSE;\r
1200 }\r
1201 }\r
1202\r
1203 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r
1204 SmmProfileRecordSmiNum ();\r
1205 }\r
1206\r
1207 //\r
1208 // BSP Handler is always called with a ValidSmi == TRUE\r
1209 //\r
1210 BSPHandler (CpuIndex, mSmmMpSyncData->EffectiveSyncMode);\r
529a5a86
MK
1211 } else {\r
1212 APHandler (CpuIndex, ValidSmi, mSmmMpSyncData->EffectiveSyncMode);\r
1213 }\r
1214 }\r
1215\r
ed3d5ecb 1216 ASSERT (*mSmmMpSyncData->CpuData[CpuIndex].Run == 0);\r
529a5a86
MK
1217\r
1218 //\r
1219 // Wait for BSP's signal to exit SMI\r
1220 //\r
fe3a75bc 1221 while (*mSmmMpSyncData->AllCpusInSync) {\r
529a5a86
MK
1222 CpuPause ();\r
1223 }\r
1224 }\r
1225\r
1226Exit:\r
1227 SmmCpuFeaturesRendezvousExit (CpuIndex);\r
1228 //\r
1229 // Restore Cr2\r
1230 //\r
1231 AsmWriteCr2 (Cr2);\r
1232}\r
1233\r
1d648531
JF
1234/**\r
1235 Allocate buffer for all semaphores and spin locks.\r
1236\r
1237**/\r
1238VOID\r
1239InitializeSmmCpuSemaphores (\r
1240 VOID\r
1241 )\r
1242{\r
1243 UINTN ProcessorCount;\r
1244 UINTN TotalSize;\r
1245 UINTN GlobalSemaphoresSize;\r
4e920581 1246 UINTN CpuSemaphoresSize;\r
695e62d1 1247 UINTN MsrSemahporeSize;\r
1d648531
JF
1248 UINTN SemaphoreSize;\r
1249 UINTN Pages;\r
1250 UINTN *SemaphoreBlock;\r
1251 UINTN SemaphoreAddr;\r
1252\r
1253 SemaphoreSize = GetSpinLockProperties ();\r
1254 ProcessorCount = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;\r
1255 GlobalSemaphoresSize = (sizeof (SMM_CPU_SEMAPHORE_GLOBAL) / sizeof (VOID *)) * SemaphoreSize;\r
4e920581 1256 CpuSemaphoresSize = (sizeof (SMM_CPU_SEMAPHORE_CPU) / sizeof (VOID *)) * ProcessorCount * SemaphoreSize;\r
695e62d1
JF
1257 MsrSemahporeSize = MSR_SPIN_LOCK_INIT_NUM * SemaphoreSize;\r
1258 TotalSize = GlobalSemaphoresSize + CpuSemaphoresSize + MsrSemahporeSize;\r
1d648531
JF
1259 DEBUG((EFI_D_INFO, "One Semaphore Size = 0x%x\n", SemaphoreSize));\r
1260 DEBUG((EFI_D_INFO, "Total Semaphores Size = 0x%x\n", TotalSize));\r
1261 Pages = EFI_SIZE_TO_PAGES (TotalSize);\r
1262 SemaphoreBlock = AllocatePages (Pages);\r
1263 ASSERT (SemaphoreBlock != NULL);\r
1264 ZeroMem (SemaphoreBlock, TotalSize);\r
1265\r
1266 SemaphoreAddr = (UINTN)SemaphoreBlock;\r
1267 mSmmCpuSemaphores.SemaphoreGlobal.Counter = (UINT32 *)SemaphoreAddr;\r
1268 SemaphoreAddr += SemaphoreSize;\r
1269 mSmmCpuSemaphores.SemaphoreGlobal.InsideSmm = (BOOLEAN *)SemaphoreAddr;\r
1270 SemaphoreAddr += SemaphoreSize;\r
1271 mSmmCpuSemaphores.SemaphoreGlobal.AllCpusInSync = (BOOLEAN *)SemaphoreAddr;\r
1272 SemaphoreAddr += SemaphoreSize;\r
1273 mSmmCpuSemaphores.SemaphoreGlobal.PFLock = (SPIN_LOCK *)SemaphoreAddr;\r
1274 SemaphoreAddr += SemaphoreSize;\r
1275 mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock\r
1276 = (SPIN_LOCK *)SemaphoreAddr;\r
6c4c15fa
JF
1277 SemaphoreAddr += SemaphoreSize;\r
1278 mSmmCpuSemaphores.SemaphoreGlobal.MemoryMappedLock\r
1279 = (SPIN_LOCK *)SemaphoreAddr;\r
1280\r
4e920581
JF
1281 SemaphoreAddr = (UINTN)SemaphoreBlock + GlobalSemaphoresSize;\r
1282 mSmmCpuSemaphores.SemaphoreCpu.Busy = (SPIN_LOCK *)SemaphoreAddr;\r
1283 SemaphoreAddr += ProcessorCount * SemaphoreSize;\r
1284 mSmmCpuSemaphores.SemaphoreCpu.Run = (UINT32 *)SemaphoreAddr;\r
1285 SemaphoreAddr += ProcessorCount * SemaphoreSize;\r
1286 mSmmCpuSemaphores.SemaphoreCpu.Present = (BOOLEAN *)SemaphoreAddr;\r
1287\r
695e62d1
JF
1288 SemaphoreAddr = (UINTN)SemaphoreBlock + GlobalSemaphoresSize + CpuSemaphoresSize;\r
1289 mSmmCpuSemaphores.SemaphoreMsr.Msr = (SPIN_LOCK *)SemaphoreAddr;\r
1290 mSmmCpuSemaphores.SemaphoreMsr.AvailableCounter =\r
1291 ((UINTN)SemaphoreBlock + Pages * SIZE_4KB - SemaphoreAddr) / SemaphoreSize;\r
1292 ASSERT (mSmmCpuSemaphores.SemaphoreMsr.AvailableCounter >= MSR_SPIN_LOCK_INIT_NUM);\r
1293\r
fe3a75bc
JF
1294 mPFLock = mSmmCpuSemaphores.SemaphoreGlobal.PFLock;\r
1295 mConfigSmmCodeAccessCheckLock = mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock;\r
6c4c15fa 1296 mMemoryMappedLock = mSmmCpuSemaphores.SemaphoreGlobal.MemoryMappedLock;\r
fe3a75bc 1297\r
1d648531
JF
1298 mSemaphoreSize = SemaphoreSize;\r
1299}\r
529a5a86
MK
1300\r
1301/**\r
1302 Initialize un-cacheable data.\r
1303\r
1304**/\r
1305VOID\r
1306EFIAPI\r
1307InitializeMpSyncData (\r
1308 VOID\r
1309 )\r
1310{\r
8b9311b7
JF
1311 UINTN CpuIndex;\r
1312\r
529a5a86 1313 if (mSmmMpSyncData != NULL) {\r
e78a2a49
JF
1314 //\r
1315 // mSmmMpSyncDataSize includes one structure of SMM_DISPATCHER_MP_SYNC_DATA, one\r
1316 // CpuData array of SMM_CPU_DATA_BLOCK and one CandidateBsp array of BOOLEAN.\r
1317 //\r
1318 ZeroMem (mSmmMpSyncData, mSmmMpSyncDataSize);\r
529a5a86
MK
1319 mSmmMpSyncData->CpuData = (SMM_CPU_DATA_BLOCK *)((UINT8 *)mSmmMpSyncData + sizeof (SMM_DISPATCHER_MP_SYNC_DATA));\r
1320 mSmmMpSyncData->CandidateBsp = (BOOLEAN *)(mSmmMpSyncData->CpuData + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus);\r
1321 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {\r
1322 //\r
1323 // Enable BSP election by setting BspIndex to -1\r
1324 //\r
1325 mSmmMpSyncData->BspIndex = (UINT32)-1;\r
1326 }\r
b43dd229 1327 mSmmMpSyncData->EffectiveSyncMode = mCpuSmmSyncMode;\r
1d648531 1328\r
8b9311b7
JF
1329 mSmmMpSyncData->Counter = mSmmCpuSemaphores.SemaphoreGlobal.Counter;\r
1330 mSmmMpSyncData->InsideSmm = mSmmCpuSemaphores.SemaphoreGlobal.InsideSmm;\r
1331 mSmmMpSyncData->AllCpusInSync = mSmmCpuSemaphores.SemaphoreGlobal.AllCpusInSync;\r
1332 ASSERT (mSmmMpSyncData->Counter != NULL && mSmmMpSyncData->InsideSmm != NULL &&\r
1333 mSmmMpSyncData->AllCpusInSync != NULL);\r
1334 *mSmmMpSyncData->Counter = 0;\r
1335 *mSmmMpSyncData->InsideSmm = FALSE;\r
1336 *mSmmMpSyncData->AllCpusInSync = FALSE;\r
1337\r
1338 for (CpuIndex = 0; CpuIndex < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; CpuIndex ++) {\r
1339 mSmmMpSyncData->CpuData[CpuIndex].Busy =\r
1340 (SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Busy + mSemaphoreSize * CpuIndex);\r
1341 mSmmMpSyncData->CpuData[CpuIndex].Run =\r
1342 (UINT32 *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Run + mSemaphoreSize * CpuIndex);\r
1343 mSmmMpSyncData->CpuData[CpuIndex].Present =\r
1344 (BOOLEAN *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Present + mSemaphoreSize * CpuIndex);\r
56e4a7d7
JF
1345 *(mSmmMpSyncData->CpuData[CpuIndex].Busy) = 0;\r
1346 *(mSmmMpSyncData->CpuData[CpuIndex].Run) = 0;\r
1347 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;\r
8b9311b7 1348 }\r
529a5a86
MK
1349 }\r
1350}\r
1351\r
1352/**\r
1353 Initialize global data for MP synchronization.\r
1354\r
1355 @param Stacks Base address of SMI stack buffer for all processors.\r
1356 @param StackSize Stack size for each processor in SMM.\r
1357\r
1358**/\r
1359UINT32\r
1360InitializeMpServiceData (\r
1361 IN VOID *Stacks,\r
1362 IN UINTN StackSize\r
1363 )\r
1364{\r
1365 UINT32 Cr3;\r
1366 UINTN Index;\r
529a5a86 1367 UINT8 *GdtTssTables;\r
529a5a86
MK
1368 UINTN GdtTableStepSize;\r
1369\r
8b9311b7
JF
1370 //\r
1371 // Allocate memory for all locks and semaphores\r
1372 //\r
1373 InitializeSmmCpuSemaphores ();\r
1374\r
d67b73cc
JF
1375 //\r
1376 // Initialize mSmmMpSyncData\r
1377 //\r
1378 mSmmMpSyncDataSize = sizeof (SMM_DISPATCHER_MP_SYNC_DATA) +\r
1379 (sizeof (SMM_CPU_DATA_BLOCK) + sizeof (BOOLEAN)) * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;\r
1380 mSmmMpSyncData = (SMM_DISPATCHER_MP_SYNC_DATA*) AllocatePages (EFI_SIZE_TO_PAGES (mSmmMpSyncDataSize));\r
1381 ASSERT (mSmmMpSyncData != NULL);\r
b43dd229 1382 mCpuSmmSyncMode = (SMM_CPU_SYNC_MODE)PcdGet8 (PcdCpuSmmSyncMode);\r
d67b73cc
JF
1383 InitializeMpSyncData ();\r
1384\r
529a5a86
MK
1385 //\r
1386 // Initialize physical address mask\r
1387 // NOTE: Physical memory above virtual address limit is not supported !!!\r
1388 //\r
1389 AsmCpuid (0x80000008, (UINT32*)&Index, NULL, NULL, NULL);\r
1390 gPhyMask = LShiftU64 (1, (UINT8)Index) - 1;\r
1391 gPhyMask &= (1ull << 48) - EFI_PAGE_SIZE;\r
1392\r
1393 //\r
1394 // Create page tables\r
1395 //\r
1396 Cr3 = SmmInitPageTable ();\r
1397\r
fe5f1949 1398 GdtTssTables = InitGdt (Cr3, &GdtTableStepSize);\r
529a5a86
MK
1399\r
1400 //\r
f12367a0 1401 // Install SMI handler for each CPU\r
529a5a86
MK
1402 //\r
1403 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
529a5a86
MK
1404 InstallSmiHandler (\r
1405 Index,\r
1406 (UINT32)mCpuHotPlugData.SmBase[Index],\r
1407 (VOID*)((UINTN)Stacks + (StackSize * Index)),\r
1408 StackSize,\r
f12367a0
MK
1409 (UINTN)(GdtTssTables + GdtTableStepSize * Index),\r
1410 gcSmiGdtr.Limit + 1,\r
529a5a86
MK
1411 gcSmiIdtr.Base,\r
1412 gcSmiIdtr.Limit + 1,\r
1413 Cr3\r
1414 );\r
1415 }\r
1416\r
529a5a86
MK
1417 //\r
1418 // Record current MTRR settings\r
1419 //\r
26ab5ac3
MK
1420 ZeroMem (&gSmiMtrrs, sizeof (gSmiMtrrs));\r
1421 MtrrGetAllMtrrs (&gSmiMtrrs);\r
529a5a86
MK
1422\r
1423 return Cr3;\r
1424}\r
1425\r
1426/**\r
1427\r
1428 Register the SMM Foundation entry point.\r
1429\r
1430 @param This Pointer to EFI_SMM_CONFIGURATION_PROTOCOL instance\r
1431 @param SmmEntryPoint SMM Foundation EntryPoint\r
1432\r
1433 @retval EFI_SUCCESS Successfully to register SMM foundation entry point\r
1434\r
1435**/\r
1436EFI_STATUS\r
1437EFIAPI\r
1438RegisterSmmEntry (\r
1439 IN CONST EFI_SMM_CONFIGURATION_PROTOCOL *This,\r
1440 IN EFI_SMM_ENTRY_POINT SmmEntryPoint\r
1441 )\r
1442{\r
1443 //\r
1444 // Record SMM Foundation EntryPoint, later invoke it on SMI entry vector.\r
1445 //\r
1446 gSmmCpuPrivate->SmmCoreEntry = SmmEntryPoint;\r
1447 return EFI_SUCCESS;\r
1448}\r