]> git.proxmox.com Git - mirror_edk2.git/blame - UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c
ArmPkg: fix compilation error in ArmDmaLib
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / MpService.c
CommitLineData
529a5a86
MK
1/** @file\r
2SMM MP service implementation\r
3\r
e1695f8d 4Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.<BR>\r
529a5a86
MK
5This program and the accompanying materials\r
6are licensed and made available under the terms and conditions of the BSD License\r
7which accompanies this distribution. The full text of the license may be found at\r
8http://opensource.org/licenses/bsd-license.php\r
9\r
10THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
11WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
12\r
13**/\r
14\r
15#include "PiSmmCpuDxeSmm.h"\r
16\r
17//\r
18// Slots for all MTRR( FIXED MTRR + VARIABLE MTRR + MTRR_LIB_IA32_MTRR_DEF_TYPE)\r
19//\r
20UINT64 gSmiMtrrs[MTRR_NUMBER_OF_FIXED_MTRR + 2 * MTRR_NUMBER_OF_VARIABLE_MTRR + 1];\r
21UINT64 gPhyMask;\r
22SMM_DISPATCHER_MP_SYNC_DATA *mSmmMpSyncData = NULL;\r
23UINTN mSmmMpSyncDataSize;\r
1d648531
JF
24SMM_CPU_SEMAPHORES mSmmCpuSemaphores;\r
25UINTN mSemaphoreSize;\r
fe3a75bc 26SPIN_LOCK *mPFLock = NULL;\r
b43dd229 27SMM_CPU_SYNC_MODE mCpuSmmSyncMode;\r
529a5a86
MK
28\r
29/**\r
30 Performs an atomic compare exchange operation to get semaphore.\r
31 The compare exchange operation must be performed using\r
32 MP safe mechanisms.\r
33\r
34 @param Sem IN: 32-bit unsigned integer\r
35 OUT: original integer - 1\r
36 @return Original integer - 1\r
37\r
38**/\r
39UINT32\r
40WaitForSemaphore (\r
41 IN OUT volatile UINT32 *Sem\r
42 )\r
43{\r
44 UINT32 Value;\r
45\r
46 do {\r
47 Value = *Sem;\r
48 } while (Value == 0 ||\r
49 InterlockedCompareExchange32 (\r
50 (UINT32*)Sem,\r
51 Value,\r
52 Value - 1\r
53 ) != Value);\r
54 return Value - 1;\r
55}\r
56\r
57\r
58/**\r
59 Performs an atomic compare exchange operation to release semaphore.\r
60 The compare exchange operation must be performed using\r
61 MP safe mechanisms.\r
62\r
63 @param Sem IN: 32-bit unsigned integer\r
64 OUT: original integer + 1\r
65 @return Original integer + 1\r
66\r
67**/\r
68UINT32\r
69ReleaseSemaphore (\r
70 IN OUT volatile UINT32 *Sem\r
71 )\r
72{\r
73 UINT32 Value;\r
74\r
75 do {\r
76 Value = *Sem;\r
77 } while (Value + 1 != 0 &&\r
78 InterlockedCompareExchange32 (\r
79 (UINT32*)Sem,\r
80 Value,\r
81 Value + 1\r
82 ) != Value);\r
83 return Value + 1;\r
84}\r
85\r
86/**\r
87 Performs an atomic compare exchange operation to lock semaphore.\r
88 The compare exchange operation must be performed using\r
89 MP safe mechanisms.\r
90\r
91 @param Sem IN: 32-bit unsigned integer\r
92 OUT: -1\r
93 @return Original integer\r
94\r
95**/\r
96UINT32\r
97LockdownSemaphore (\r
98 IN OUT volatile UINT32 *Sem\r
99 )\r
100{\r
101 UINT32 Value;\r
102\r
103 do {\r
104 Value = *Sem;\r
105 } while (InterlockedCompareExchange32 (\r
106 (UINT32*)Sem,\r
107 Value, (UINT32)-1\r
108 ) != Value);\r
109 return Value;\r
110}\r
111\r
112/**\r
113 Wait all APs to performs an atomic compare exchange operation to release semaphore.\r
114\r
115 @param NumberOfAPs AP number\r
116\r
117**/\r
118VOID\r
119WaitForAllAPs (\r
120 IN UINTN NumberOfAPs\r
121 )\r
122{\r
123 UINTN BspIndex;\r
124\r
125 BspIndex = mSmmMpSyncData->BspIndex;\r
126 while (NumberOfAPs-- > 0) {\r
ed3d5ecb 127 WaitForSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
529a5a86
MK
128 }\r
129}\r
130\r
131/**\r
132 Performs an atomic compare exchange operation to release semaphore\r
133 for each AP.\r
134\r
135**/\r
136VOID\r
137ReleaseAllAPs (\r
138 VOID\r
139 )\r
140{\r
141 UINTN Index;\r
142 UINTN BspIndex;\r
143\r
144 BspIndex = mSmmMpSyncData->BspIndex;\r
145 for (Index = mMaxNumberOfCpus; Index-- > 0;) {\r
ed3d5ecb
JF
146 if (Index != BspIndex && *(mSmmMpSyncData->CpuData[Index].Present)) {\r
147 ReleaseSemaphore (mSmmMpSyncData->CpuData[Index].Run);\r
529a5a86
MK
148 }\r
149 }\r
150}\r
151\r
152/**\r
153 Checks if all CPUs (with certain exceptions) have checked in for this SMI run\r
154\r
155 @param Exceptions CPU Arrival exception flags.\r
156\r
157 @retval TRUE if all CPUs the have checked in.\r
158 @retval FALSE if at least one Normal AP hasn't checked in.\r
159\r
160**/\r
161BOOLEAN\r
162AllCpusInSmmWithExceptions (\r
163 SMM_CPU_ARRIVAL_EXCEPTIONS Exceptions\r
164 )\r
165{\r
166 UINTN Index;\r
167 SMM_CPU_DATA_BLOCK *CpuData;\r
168 EFI_PROCESSOR_INFORMATION *ProcessorInfo;\r
169\r
fe3a75bc 170 ASSERT (*mSmmMpSyncData->Counter <= mNumberOfCpus);\r
529a5a86 171\r
fe3a75bc 172 if (*mSmmMpSyncData->Counter == mNumberOfCpus) {\r
529a5a86
MK
173 return TRUE;\r
174 }\r
175\r
176 CpuData = mSmmMpSyncData->CpuData;\r
177 ProcessorInfo = gSmmCpuPrivate->ProcessorInfo;\r
178 for (Index = mMaxNumberOfCpus; Index-- > 0;) {\r
ed3d5ecb 179 if (!(*(CpuData[Index].Present)) && ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID) {\r
529a5a86
MK
180 if (((Exceptions & ARRIVAL_EXCEPTION_DELAYED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmDelayed) != 0) {\r
181 continue;\r
182 }\r
183 if (((Exceptions & ARRIVAL_EXCEPTION_BLOCKED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmBlocked) != 0) {\r
184 continue;\r
185 }\r
186 if (((Exceptions & ARRIVAL_EXCEPTION_SMI_DISABLED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmEnable) != 0) {\r
187 continue;\r
188 }\r
189 return FALSE;\r
190 }\r
191 }\r
192\r
193\r
194 return TRUE;\r
195}\r
196\r
197\r
198/**\r
199 Given timeout constraint, wait for all APs to arrive, and insure when this function returns, no AP will execute normal mode code before\r
200 entering SMM, except SMI disabled APs.\r
201\r
202**/\r
203VOID\r
204SmmWaitForApArrival (\r
205 VOID\r
206 )\r
207{\r
208 UINT64 Timer;\r
209 UINTN Index;\r
210\r
fe3a75bc 211 ASSERT (*mSmmMpSyncData->Counter <= mNumberOfCpus);\r
529a5a86
MK
212\r
213 //\r
214 // Platform implementor should choose a timeout value appropriately:\r
215 // - The timeout value should balance the SMM time constrains and the likelihood that delayed CPUs are excluded in the SMM run. Note\r
216 // the SMI Handlers must ALWAYS take into account the cases that not all APs are available in an SMI run.\r
217 // - The timeout value must, in the case of 2nd timeout, be at least long enough to give time for all APs to receive the SMI IPI\r
218 // and either enter SMM or buffer the SMI, to insure there is no CPU running normal mode code when SMI handling starts. This will\r
219 // be TRUE even if a blocked CPU is brought out of the blocked state by a normal mode CPU (before the normal mode CPU received the\r
220 // SMI IPI), because with a buffered SMI, and CPU will enter SMM immediately after it is brought out of the blocked state.\r
221 // - The timeout value must be longer than longest possible IO operation in the system\r
222 //\r
223\r
224 //\r
225 // Sync with APs 1st timeout\r
226 //\r
227 for (Timer = StartSyncTimer ();\r
228 !IsSyncTimerTimeout (Timer) &&\r
229 !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED | ARRIVAL_EXCEPTION_SMI_DISABLED );\r
230 ) {\r
231 CpuPause ();\r
232 }\r
233\r
234 //\r
235 // Not all APs have arrived, so we need 2nd round of timeout. IPIs should be sent to ALL none present APs,\r
236 // because:\r
237 // a) Delayed AP may have just come out of the delayed state. Blocked AP may have just been brought out of blocked state by some AP running\r
238 // normal mode code. These APs need to be guaranteed to have an SMI pending to insure that once they are out of delayed / blocked state, they\r
239 // enter SMI immediately without executing instructions in normal mode. Note traditional flow requires there are no APs doing normal mode\r
240 // work while SMI handling is on-going.\r
241 // b) As a consequence of SMI IPI sending, (spurious) SMI may occur after this SMM run.\r
242 // c) ** NOTE **: Use SMI disabling feature VERY CAREFULLY (if at all) for traditional flow, because a processor in SMI-disabled state\r
243 // will execute normal mode code, which breaks the traditional SMI handlers' assumption that no APs are doing normal\r
244 // mode work while SMI handling is on-going.\r
245 // d) We don't add code to check SMI disabling status to skip sending IPI to SMI disabled APs, because:\r
246 // - In traditional flow, SMI disabling is discouraged.\r
247 // - In relaxed flow, CheckApArrival() will check SMI disabling status before calling this function.\r
248 // In both cases, adding SMI-disabling checking code increases overhead.\r
249 //\r
fe3a75bc 250 if (*mSmmMpSyncData->Counter < mNumberOfCpus) {\r
529a5a86
MK
251 //\r
252 // Send SMI IPIs to bring outside processors in\r
253 //\r
254 for (Index = mMaxNumberOfCpus; Index-- > 0;) {\r
ed3d5ecb 255 if (!(*(mSmmMpSyncData->CpuData[Index].Present)) && gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID) {\r
529a5a86
MK
256 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId);\r
257 }\r
258 }\r
259\r
260 //\r
261 // Sync with APs 2nd timeout.\r
262 //\r
263 for (Timer = StartSyncTimer ();\r
264 !IsSyncTimerTimeout (Timer) &&\r
265 !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED | ARRIVAL_EXCEPTION_SMI_DISABLED );\r
266 ) {\r
267 CpuPause ();\r
268 }\r
269 }\r
270\r
271 return;\r
272}\r
273\r
274\r
275/**\r
276 Replace OS MTRR's with SMI MTRR's.\r
277\r
278 @param CpuIndex Processor Index\r
279\r
280**/\r
281VOID\r
282ReplaceOSMtrrs (\r
283 IN UINTN CpuIndex\r
284 )\r
285{\r
286 PROCESSOR_SMM_DESCRIPTOR *Psd;\r
287 UINT64 *SmiMtrrs;\r
288 MTRR_SETTINGS *BiosMtrr;\r
289\r
290 Psd = (PROCESSOR_SMM_DESCRIPTOR*)(mCpuHotPlugData.SmBase[CpuIndex] + SMM_PSD_OFFSET);\r
291 SmiMtrrs = (UINT64*)(UINTN)Psd->MtrrBaseMaskPtr;\r
292\r
293 SmmCpuFeaturesDisableSmrr ();\r
294\r
295 //\r
296 // Replace all MTRRs registers\r
297 //\r
298 BiosMtrr = (MTRR_SETTINGS*)SmiMtrrs;\r
299 MtrrSetAllMtrrs(BiosMtrr);\r
300}\r
301\r
302/**\r
303 SMI handler for BSP.\r
304\r
305 @param CpuIndex BSP processor Index\r
306 @param SyncMode SMM MP sync mode\r
307\r
308**/\r
309VOID\r
310BSPHandler (\r
311 IN UINTN CpuIndex,\r
312 IN SMM_CPU_SYNC_MODE SyncMode\r
313 )\r
314{\r
315 UINTN Index;\r
316 MTRR_SETTINGS Mtrrs;\r
317 UINTN ApCount;\r
318 BOOLEAN ClearTopLevelSmiResult;\r
319 UINTN PresentCount;\r
320\r
321 ASSERT (CpuIndex == mSmmMpSyncData->BspIndex);\r
322 ApCount = 0;\r
323\r
324 //\r
325 // Flag BSP's presence\r
326 //\r
fe3a75bc 327 *mSmmMpSyncData->InsideSmm = TRUE;\r
529a5a86
MK
328\r
329 //\r
330 // Initialize Debug Agent to start source level debug in BSP handler\r
331 //\r
332 InitializeDebugAgent (DEBUG_AGENT_INIT_ENTER_SMI, NULL, NULL);\r
333\r
334 //\r
335 // Mark this processor's presence\r
336 //\r
ed3d5ecb 337 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = TRUE;\r
529a5a86
MK
338\r
339 //\r
340 // Clear platform top level SMI status bit before calling SMI handlers. If\r
341 // we cleared it after SMI handlers are run, we would miss the SMI that\r
342 // occurs after SMI handlers are done and before SMI status bit is cleared.\r
343 //\r
344 ClearTopLevelSmiResult = ClearTopLevelSmiStatus();\r
345 ASSERT (ClearTopLevelSmiResult == TRUE);\r
346\r
347 //\r
348 // Set running processor index\r
349 //\r
350 gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu = CpuIndex;\r
351\r
352 //\r
353 // If Traditional Sync Mode or need to configure MTRRs: gather all available APs.\r
354 //\r
355 if (SyncMode == SmmCpuSyncModeTradition || SmmCpuFeaturesNeedConfigureMtrrs()) {\r
356\r
357 //\r
358 // Wait for APs to arrive\r
359 //\r
360 SmmWaitForApArrival();\r
361\r
362 //\r
363 // Lock the counter down and retrieve the number of APs\r
364 //\r
fe3a75bc
JF
365 *mSmmMpSyncData->AllCpusInSync = TRUE;\r
366 ApCount = LockdownSemaphore (mSmmMpSyncData->Counter) - 1;\r
529a5a86
MK
367\r
368 //\r
369 // Wait for all APs to get ready for programming MTRRs\r
370 //\r
371 WaitForAllAPs (ApCount);\r
372\r
373 if (SmmCpuFeaturesNeedConfigureMtrrs()) {\r
374 //\r
375 // Signal all APs it's time for backup MTRRs\r
376 //\r
377 ReleaseAllAPs ();\r
378\r
379 //\r
380 // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at\r
381 // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set\r
382 // to a large enough value to avoid this situation.\r
383 // Note: For HT capable CPUs, threads within a core share the same set of MTRRs.\r
384 // We do the backup first and then set MTRR to avoid race condition for threads\r
385 // in the same core.\r
386 //\r
387 MtrrGetAllMtrrs(&Mtrrs);\r
388\r
389 //\r
390 // Wait for all APs to complete their MTRR saving\r
391 //\r
392 WaitForAllAPs (ApCount);\r
393\r
394 //\r
395 // Let all processors program SMM MTRRs together\r
396 //\r
397 ReleaseAllAPs ();\r
398\r
399 //\r
400 // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at\r
401 // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set\r
402 // to a large enough value to avoid this situation.\r
403 //\r
404 ReplaceOSMtrrs (CpuIndex);\r
405\r
406 //\r
407 // Wait for all APs to complete their MTRR programming\r
408 //\r
409 WaitForAllAPs (ApCount);\r
410 }\r
411 }\r
412\r
413 //\r
414 // The BUSY lock is initialized to Acquired state\r
415 //\r
ed3d5ecb 416 AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
529a5a86
MK
417\r
418 //\r
9f419739 419 // Perform the pre tasks\r
529a5a86 420 //\r
9f419739 421 PerformPreTasks ();\r
529a5a86
MK
422\r
423 //\r
424 // Invoke SMM Foundation EntryPoint with the processor information context.\r
425 //\r
426 gSmmCpuPrivate->SmmCoreEntry (&gSmmCpuPrivate->SmmCoreEntryContext);\r
427\r
428 //\r
429 // Make sure all APs have completed their pending none-block tasks\r
430 //\r
431 for (Index = mMaxNumberOfCpus; Index-- > 0;) {\r
ed3d5ecb
JF
432 if (Index != CpuIndex && *(mSmmMpSyncData->CpuData[Index].Present)) {\r
433 AcquireSpinLock (mSmmMpSyncData->CpuData[Index].Busy);\r
434 ReleaseSpinLock (mSmmMpSyncData->CpuData[Index].Busy);\r
529a5a86
MK
435 }\r
436 }\r
437\r
438 //\r
439 // Perform the remaining tasks\r
440 //\r
441 PerformRemainingTasks ();\r
442\r
443 //\r
444 // If Relaxed-AP Sync Mode: gather all available APs after BSP SMM handlers are done, and\r
445 // make those APs to exit SMI synchronously. APs which arrive later will be excluded and\r
446 // will run through freely.\r
447 //\r
448 if (SyncMode != SmmCpuSyncModeTradition && !SmmCpuFeaturesNeedConfigureMtrrs()) {\r
449\r
450 //\r
451 // Lock the counter down and retrieve the number of APs\r
452 //\r
fe3a75bc
JF
453 *mSmmMpSyncData->AllCpusInSync = TRUE;\r
454 ApCount = LockdownSemaphore (mSmmMpSyncData->Counter) - 1;\r
529a5a86
MK
455 //\r
456 // Make sure all APs have their Present flag set\r
457 //\r
458 while (TRUE) {\r
459 PresentCount = 0;\r
460 for (Index = mMaxNumberOfCpus; Index-- > 0;) {\r
ed3d5ecb 461 if (*(mSmmMpSyncData->CpuData[Index].Present)) {\r
529a5a86
MK
462 PresentCount ++;\r
463 }\r
464 }\r
465 if (PresentCount > ApCount) {\r
466 break;\r
467 }\r
468 }\r
469 }\r
470\r
471 //\r
472 // Notify all APs to exit\r
473 //\r
fe3a75bc 474 *mSmmMpSyncData->InsideSmm = FALSE;\r
529a5a86
MK
475 ReleaseAllAPs ();\r
476\r
477 //\r
478 // Wait for all APs to complete their pending tasks\r
479 //\r
480 WaitForAllAPs (ApCount);\r
481\r
482 if (SmmCpuFeaturesNeedConfigureMtrrs()) {\r
483 //\r
484 // Signal APs to restore MTRRs\r
485 //\r
486 ReleaseAllAPs ();\r
487\r
488 //\r
489 // Restore OS MTRRs\r
490 //\r
491 SmmCpuFeaturesReenableSmrr ();\r
492 MtrrSetAllMtrrs(&Mtrrs);\r
493\r
494 //\r
495 // Wait for all APs to complete MTRR programming\r
496 //\r
497 WaitForAllAPs (ApCount);\r
498 }\r
499\r
500 //\r
501 // Stop source level debug in BSP handler, the code below will not be\r
502 // debugged.\r
503 //\r
504 InitializeDebugAgent (DEBUG_AGENT_INIT_EXIT_SMI, NULL, NULL);\r
505\r
506 //\r
507 // Signal APs to Reset states/semaphore for this processor\r
508 //\r
509 ReleaseAllAPs ();\r
510\r
511 //\r
512 // Perform pending operations for hot-plug\r
513 //\r
514 SmmCpuUpdate ();\r
515\r
516 //\r
517 // Clear the Present flag of BSP\r
518 //\r
ed3d5ecb 519 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;\r
529a5a86
MK
520\r
521 //\r
522 // Gather APs to exit SMM synchronously. Note the Present flag is cleared by now but\r
523 // WaitForAllAps does not depend on the Present flag.\r
524 //\r
525 WaitForAllAPs (ApCount);\r
526\r
527 //\r
528 // Reset BspIndex to -1, meaning BSP has not been elected.\r
529 //\r
530 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {\r
531 mSmmMpSyncData->BspIndex = (UINT32)-1;\r
532 }\r
533\r
534 //\r
535 // Allow APs to check in from this point on\r
536 //\r
fe3a75bc
JF
537 *mSmmMpSyncData->Counter = 0;\r
538 *mSmmMpSyncData->AllCpusInSync = FALSE;\r
529a5a86
MK
539}\r
540\r
541/**\r
542 SMI handler for AP.\r
543\r
544 @param CpuIndex AP processor Index.\r
545 @param ValidSmi Indicates that current SMI is a valid SMI or not.\r
546 @param SyncMode SMM MP sync mode.\r
547\r
548**/\r
549VOID\r
550APHandler (\r
551 IN UINTN CpuIndex,\r
552 IN BOOLEAN ValidSmi,\r
553 IN SMM_CPU_SYNC_MODE SyncMode\r
554 )\r
555{\r
556 UINT64 Timer;\r
557 UINTN BspIndex;\r
558 MTRR_SETTINGS Mtrrs;\r
559\r
560 //\r
561 // Timeout BSP\r
562 //\r
563 for (Timer = StartSyncTimer ();\r
564 !IsSyncTimerTimeout (Timer) &&\r
fe3a75bc 565 !(*mSmmMpSyncData->InsideSmm);\r
529a5a86
MK
566 ) {\r
567 CpuPause ();\r
568 }\r
569\r
fe3a75bc 570 if (!(*mSmmMpSyncData->InsideSmm)) {\r
529a5a86
MK
571 //\r
572 // BSP timeout in the first round\r
573 //\r
574 if (mSmmMpSyncData->BspIndex != -1) {\r
575 //\r
576 // BSP Index is known\r
577 //\r
578 BspIndex = mSmmMpSyncData->BspIndex;\r
579 ASSERT (CpuIndex != BspIndex);\r
580\r
581 //\r
582 // Send SMI IPI to bring BSP in\r
583 //\r
584 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[BspIndex].ProcessorId);\r
585\r
586 //\r
587 // Now clock BSP for the 2nd time\r
588 //\r
589 for (Timer = StartSyncTimer ();\r
590 !IsSyncTimerTimeout (Timer) &&\r
fe3a75bc 591 !(*mSmmMpSyncData->InsideSmm);\r
529a5a86
MK
592 ) {\r
593 CpuPause ();\r
594 }\r
595\r
fe3a75bc 596 if (!(*mSmmMpSyncData->InsideSmm)) {\r
529a5a86
MK
597 //\r
598 // Give up since BSP is unable to enter SMM\r
599 // and signal the completion of this AP\r
fe3a75bc 600 WaitForSemaphore (mSmmMpSyncData->Counter);\r
529a5a86
MK
601 return;\r
602 }\r
603 } else {\r
604 //\r
605 // Don't know BSP index. Give up without sending IPI to BSP.\r
606 //\r
fe3a75bc 607 WaitForSemaphore (mSmmMpSyncData->Counter);\r
529a5a86
MK
608 return;\r
609 }\r
610 }\r
611\r
612 //\r
613 // BSP is available\r
614 //\r
615 BspIndex = mSmmMpSyncData->BspIndex;\r
616 ASSERT (CpuIndex != BspIndex);\r
617\r
618 //\r
619 // Mark this processor's presence\r
620 //\r
ed3d5ecb 621 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = TRUE;\r
529a5a86
MK
622\r
623 if (SyncMode == SmmCpuSyncModeTradition || SmmCpuFeaturesNeedConfigureMtrrs()) {\r
624 //\r
625 // Notify BSP of arrival at this point\r
626 //\r
ed3d5ecb 627 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
529a5a86
MK
628 }\r
629\r
630 if (SmmCpuFeaturesNeedConfigureMtrrs()) {\r
631 //\r
632 // Wait for the signal from BSP to backup MTRRs\r
633 //\r
ed3d5ecb 634 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r
529a5a86
MK
635\r
636 //\r
637 // Backup OS MTRRs\r
638 //\r
639 MtrrGetAllMtrrs(&Mtrrs);\r
640\r
641 //\r
642 // Signal BSP the completion of this AP\r
643 //\r
ed3d5ecb 644 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
529a5a86
MK
645\r
646 //\r
647 // Wait for BSP's signal to program MTRRs\r
648 //\r
ed3d5ecb 649 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r
529a5a86
MK
650\r
651 //\r
652 // Replace OS MTRRs with SMI MTRRs\r
653 //\r
654 ReplaceOSMtrrs (CpuIndex);\r
655\r
656 //\r
657 // Signal BSP the completion of this AP\r
658 //\r
ed3d5ecb 659 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
529a5a86
MK
660 }\r
661\r
662 while (TRUE) {\r
663 //\r
664 // Wait for something to happen\r
665 //\r
ed3d5ecb 666 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r
529a5a86
MK
667\r
668 //\r
669 // Check if BSP wants to exit SMM\r
670 //\r
fe3a75bc 671 if (!(*mSmmMpSyncData->InsideSmm)) {\r
529a5a86
MK
672 break;\r
673 }\r
674\r
675 //\r
676 // BUSY should be acquired by SmmStartupThisAp()\r
677 //\r
678 ASSERT (\r
ed3d5ecb 679 !AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[CpuIndex].Busy)\r
529a5a86
MK
680 );\r
681\r
682 //\r
683 // Invoke the scheduled procedure\r
684 //\r
685 (*mSmmMpSyncData->CpuData[CpuIndex].Procedure) (\r
686 (VOID*)mSmmMpSyncData->CpuData[CpuIndex].Parameter\r
687 );\r
688\r
689 //\r
690 // Release BUSY\r
691 //\r
ed3d5ecb 692 ReleaseSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
529a5a86
MK
693 }\r
694\r
695 if (SmmCpuFeaturesNeedConfigureMtrrs()) {\r
696 //\r
697 // Notify BSP the readiness of this AP to program MTRRs\r
698 //\r
ed3d5ecb 699 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
529a5a86
MK
700\r
701 //\r
702 // Wait for the signal from BSP to program MTRRs\r
703 //\r
ed3d5ecb 704 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r
529a5a86
MK
705\r
706 //\r
707 // Restore OS MTRRs\r
708 //\r
709 SmmCpuFeaturesReenableSmrr ();\r
710 MtrrSetAllMtrrs(&Mtrrs);\r
711 }\r
712\r
713 //\r
714 // Notify BSP the readiness of this AP to Reset states/semaphore for this processor\r
715 //\r
ed3d5ecb 716 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
529a5a86
MK
717\r
718 //\r
719 // Wait for the signal from BSP to Reset states/semaphore for this processor\r
720 //\r
ed3d5ecb 721 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r
529a5a86
MK
722\r
723 //\r
724 // Reset states/semaphore for this processor\r
725 //\r
ed3d5ecb 726 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;\r
529a5a86
MK
727\r
728 //\r
729 // Notify BSP the readiness of this AP to exit SMM\r
730 //\r
ed3d5ecb 731 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
529a5a86
MK
732\r
733}\r
734\r
735/**\r
736 Create 4G PageTable in SMRAM.\r
737\r
717fb604 738 @param[in] Is32BitPageTable Whether the page table is 32-bit PAE\r
529a5a86
MK
739 @return PageTable Address\r
740\r
741**/\r
742UINT32\r
743Gen4GPageTable (\r
881520ea 744 IN BOOLEAN Is32BitPageTable\r
529a5a86
MK
745 )\r
746{\r
747 VOID *PageTable;\r
748 UINTN Index;\r
749 UINT64 *Pte;\r
750 UINTN PagesNeeded;\r
751 UINTN Low2MBoundary;\r
752 UINTN High2MBoundary;\r
753 UINTN Pages;\r
754 UINTN GuardPage;\r
755 UINT64 *Pdpte;\r
756 UINTN PageIndex;\r
757 UINTN PageAddress;\r
758\r
759 Low2MBoundary = 0;\r
760 High2MBoundary = 0;\r
761 PagesNeeded = 0;\r
762 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
763 //\r
764 // Add one more page for known good stack, then find the lower 2MB aligned address.\r
765 //\r
766 Low2MBoundary = (mSmmStackArrayBase + EFI_PAGE_SIZE) & ~(SIZE_2MB-1);\r
767 //\r
768 // Add two more pages for known good stack and stack guard page,\r
769 // then find the lower 2MB aligned address.\r
770 //\r
771 High2MBoundary = (mSmmStackArrayEnd - mSmmStackSize + EFI_PAGE_SIZE * 2) & ~(SIZE_2MB-1);\r
772 PagesNeeded = ((High2MBoundary - Low2MBoundary) / SIZE_2MB) + 1;\r
773 }\r
774 //\r
775 // Allocate the page table\r
776 //\r
717fb604 777 PageTable = AllocatePageTableMemory (5 + PagesNeeded);\r
529a5a86
MK
778 ASSERT (PageTable != NULL);\r
779\r
717fb604 780 PageTable = (VOID *)((UINTN)PageTable);\r
529a5a86
MK
781 Pte = (UINT64*)PageTable;\r
782\r
783 //\r
784 // Zero out all page table entries first\r
785 //\r
786 ZeroMem (Pte, EFI_PAGES_TO_SIZE (1));\r
787\r
788 //\r
789 // Set Page Directory Pointers\r
790 //\r
791 for (Index = 0; Index < 4; Index++) {\r
881520ea 792 Pte[Index] = (UINTN)PageTable + EFI_PAGE_SIZE * (Index + 1) + (Is32BitPageTable ? IA32_PAE_PDPTE_ATTRIBUTE_BITS : PAGE_ATTRIBUTE_BITS);\r
529a5a86
MK
793 }\r
794 Pte += EFI_PAGE_SIZE / sizeof (*Pte);\r
795\r
796 //\r
797 // Fill in Page Directory Entries\r
798 //\r
799 for (Index = 0; Index < EFI_PAGE_SIZE * 4 / sizeof (*Pte); Index++) {\r
881520ea 800 Pte[Index] = (Index << 21) | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;\r
529a5a86
MK
801 }\r
802\r
803 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
804 Pages = (UINTN)PageTable + EFI_PAGES_TO_SIZE (5);\r
805 GuardPage = mSmmStackArrayBase + EFI_PAGE_SIZE;\r
806 Pdpte = (UINT64*)PageTable;\r
807 for (PageIndex = Low2MBoundary; PageIndex <= High2MBoundary; PageIndex += SIZE_2MB) {\r
808 Pte = (UINT64*)(UINTN)(Pdpte[BitFieldRead32 ((UINT32)PageIndex, 30, 31)] & ~(EFI_PAGE_SIZE - 1));\r
881520ea 809 Pte[BitFieldRead32 ((UINT32)PageIndex, 21, 29)] = (UINT64)Pages | PAGE_ATTRIBUTE_BITS;\r
529a5a86
MK
810 //\r
811 // Fill in Page Table Entries\r
812 //\r
813 Pte = (UINT64*)Pages;\r
814 PageAddress = PageIndex;\r
815 for (Index = 0; Index < EFI_PAGE_SIZE / sizeof (*Pte); Index++) {\r
816 if (PageAddress == GuardPage) {\r
817 //\r
818 // Mark the guard page as non-present\r
819 //\r
820 Pte[Index] = PageAddress;\r
821 GuardPage += mSmmStackSize;\r
822 if (GuardPage > mSmmStackArrayEnd) {\r
823 GuardPage = 0;\r
824 }\r
825 } else {\r
881520ea 826 Pte[Index] = PageAddress | PAGE_ATTRIBUTE_BITS;\r
529a5a86
MK
827 }\r
828 PageAddress+= EFI_PAGE_SIZE;\r
829 }\r
830 Pages += EFI_PAGE_SIZE;\r
831 }\r
832 }\r
833\r
834 return (UINT32)(UINTN)PageTable;\r
835}\r
836\r
837/**\r
838 Set memory cache ability.\r
839\r
840 @param PageTable PageTable Address\r
841 @param Address Memory Address to change cache ability\r
842 @param Cacheability Cache ability to set\r
843\r
844**/\r
845VOID\r
846SetCacheability (\r
847 IN UINT64 *PageTable,\r
848 IN UINTN Address,\r
849 IN UINT8 Cacheability\r
850 )\r
851{\r
852 UINTN PTIndex;\r
853 VOID *NewPageTableAddress;\r
854 UINT64 *NewPageTable;\r
855 UINTN Index;\r
856\r
857 ASSERT ((Address & EFI_PAGE_MASK) == 0);\r
858\r
859 if (sizeof (UINTN) == sizeof (UINT64)) {\r
860 PTIndex = (UINTN)RShiftU64 (Address, 39) & 0x1ff;\r
861 ASSERT (PageTable[PTIndex] & IA32_PG_P);\r
862 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & gPhyMask);\r
863 }\r
864\r
865 PTIndex = (UINTN)RShiftU64 (Address, 30) & 0x1ff;\r
866 ASSERT (PageTable[PTIndex] & IA32_PG_P);\r
867 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & gPhyMask);\r
868\r
869 //\r
870 // A perfect implementation should check the original cacheability with the\r
871 // one being set, and break a 2M page entry into pieces only when they\r
872 // disagreed.\r
873 //\r
874 PTIndex = (UINTN)RShiftU64 (Address, 21) & 0x1ff;\r
875 if ((PageTable[PTIndex] & IA32_PG_PS) != 0) {\r
876 //\r
877 // Allocate a page from SMRAM\r
878 //\r
21c17193 879 NewPageTableAddress = AllocatePageTableMemory (1);\r
529a5a86
MK
880 ASSERT (NewPageTableAddress != NULL);\r
881\r
882 NewPageTable = (UINT64 *)NewPageTableAddress;\r
883\r
884 for (Index = 0; Index < 0x200; Index++) {\r
885 NewPageTable[Index] = PageTable[PTIndex];\r
886 if ((NewPageTable[Index] & IA32_PG_PAT_2M) != 0) {\r
887 NewPageTable[Index] &= ~((UINT64)IA32_PG_PAT_2M);\r
888 NewPageTable[Index] |= (UINT64)IA32_PG_PAT_4K;\r
889 }\r
890 NewPageTable[Index] |= (UINT64)(Index << EFI_PAGE_SHIFT);\r
891 }\r
892\r
881520ea 893 PageTable[PTIndex] = ((UINTN)NewPageTableAddress & gPhyMask) | PAGE_ATTRIBUTE_BITS;\r
529a5a86
MK
894 }\r
895\r
896 ASSERT (PageTable[PTIndex] & IA32_PG_P);\r
897 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & gPhyMask);\r
898\r
899 PTIndex = (UINTN)RShiftU64 (Address, 12) & 0x1ff;\r
900 ASSERT (PageTable[PTIndex] & IA32_PG_P);\r
901 PageTable[PTIndex] &= ~((UINT64)((IA32_PG_PAT_4K | IA32_PG_CD | IA32_PG_WT)));\r
902 PageTable[PTIndex] |= (UINT64)Cacheability;\r
903}\r
904\r
529a5a86
MK
905/**\r
906 Schedule a procedure to run on the specified CPU.\r
907\r
717fb604
JY
908 @param[in] Procedure The address of the procedure to run\r
909 @param[in] CpuIndex Target CPU Index\r
910 @param[in, OUT] ProcArguments The parameter to pass to the procedure\r
911 @param[in] BlockingMode Startup AP in blocking mode or not\r
529a5a86
MK
912\r
913 @retval EFI_INVALID_PARAMETER CpuNumber not valid\r
914 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP\r
915 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM\r
916 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy\r
917 @retval EFI_SUCCESS The procedure has been successfully scheduled\r
918\r
919**/\r
920EFI_STATUS\r
717fb604 921InternalSmmStartupThisAp (\r
529a5a86
MK
922 IN EFI_AP_PROCEDURE Procedure,\r
923 IN UINTN CpuIndex,\r
717fb604
JY
924 IN OUT VOID *ProcArguments OPTIONAL,\r
925 IN BOOLEAN BlockingMode\r
529a5a86
MK
926 )\r
927{\r
717fb604
JY
928 if (CpuIndex >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus) {\r
929 DEBUG((DEBUG_ERROR, "CpuIndex(%d) >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus(%d)\n", CpuIndex, gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus));\r
930 return EFI_INVALID_PARAMETER;\r
931 }\r
932 if (CpuIndex == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) {\r
933 DEBUG((DEBUG_ERROR, "CpuIndex(%d) == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu\n", CpuIndex));\r
529a5a86
MK
934 return EFI_INVALID_PARAMETER;\r
935 }\r
717fb604
JY
936 if (!(*(mSmmMpSyncData->CpuData[CpuIndex].Present))) {\r
937 if (mSmmMpSyncData->EffectiveSyncMode == SmmCpuSyncModeTradition) {\r
938 DEBUG((DEBUG_ERROR, "!mSmmMpSyncData->CpuData[%d].Present\n", CpuIndex));\r
939 }\r
940 return EFI_INVALID_PARAMETER;\r
941 }\r
942 if (gSmmCpuPrivate->Operation[CpuIndex] == SmmCpuRemove) {\r
943 if (!FeaturePcdGet (PcdCpuHotPlugSupport)) {\r
944 DEBUG((DEBUG_ERROR, "gSmmCpuPrivate->Operation[%d] == SmmCpuRemove\n", CpuIndex));\r
945 }\r
946 return EFI_INVALID_PARAMETER;\r
947 }\r
948\r
949 if (BlockingMode) {\r
950 AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
951 } else {\r
952 if (!AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[CpuIndex].Busy)) {\r
953 DEBUG((DEBUG_ERROR, "mSmmMpSyncData->CpuData[%d].Busy\n", CpuIndex));\r
954 return EFI_INVALID_PARAMETER;\r
955 }\r
956 }\r
529a5a86
MK
957\r
958 mSmmMpSyncData->CpuData[CpuIndex].Procedure = Procedure;\r
959 mSmmMpSyncData->CpuData[CpuIndex].Parameter = ProcArguments;\r
ed3d5ecb 960 ReleaseSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r
529a5a86 961\r
717fb604 962 if (BlockingMode) {\r
ed3d5ecb
JF
963 AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
964 ReleaseSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
529a5a86
MK
965 }\r
966 return EFI_SUCCESS;\r
967}\r
968\r
717fb604
JY
969/**\r
970 Schedule a procedure to run on the specified CPU in blocking mode.\r
971\r
972 @param[in] Procedure The address of the procedure to run\r
973 @param[in] CpuIndex Target CPU Index\r
974 @param[in, out] ProcArguments The parameter to pass to the procedure\r
975\r
976 @retval EFI_INVALID_PARAMETER CpuNumber not valid\r
977 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP\r
978 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM\r
979 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy\r
980 @retval EFI_SUCCESS The procedure has been successfully scheduled\r
981\r
982**/\r
983EFI_STATUS\r
984EFIAPI\r
985SmmBlockingStartupThisAp (\r
986 IN EFI_AP_PROCEDURE Procedure,\r
987 IN UINTN CpuIndex,\r
988 IN OUT VOID *ProcArguments OPTIONAL\r
989 )\r
990{\r
991 return InternalSmmStartupThisAp(Procedure, CpuIndex, ProcArguments, TRUE);\r
992}\r
993\r
994/**\r
995 Schedule a procedure to run on the specified CPU.\r
996\r
997 @param Procedure The address of the procedure to run\r
998 @param CpuIndex Target CPU Index\r
999 @param ProcArguments The parameter to pass to the procedure\r
1000\r
1001 @retval EFI_INVALID_PARAMETER CpuNumber not valid\r
1002 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP\r
1003 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM\r
1004 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy\r
1005 @retval EFI_SUCCESS The procedure has been successfully scheduled\r
1006\r
1007**/\r
1008EFI_STATUS\r
1009EFIAPI\r
1010SmmStartupThisAp (\r
1011 IN EFI_AP_PROCEDURE Procedure,\r
1012 IN UINTN CpuIndex,\r
1013 IN OUT VOID *ProcArguments OPTIONAL\r
1014 )\r
1015{\r
1016 return InternalSmmStartupThisAp(Procedure, CpuIndex, ProcArguments, FeaturePcdGet (PcdCpuSmmBlockStartupThisAp));\r
1017}\r
1018\r
f45f2d4a 1019/**\r
3eed6dda 1020 This function sets DR6 & DR7 according to SMM save state, before running SMM C code.\r
f45f2d4a
JY
1021 They are useful when you want to enable hardware breakpoints in SMM without entry SMM mode.\r
1022\r
1023 NOTE: It might not be appreciated in runtime since it might\r
1024 conflict with OS debugging facilities. Turn them off in RELEASE.\r
1025\r
1026 @param CpuIndex CPU Index\r
1027\r
1028**/\r
1029VOID\r
1030EFIAPI\r
1031CpuSmmDebugEntry (\r
1032 IN UINTN CpuIndex\r
1033 )\r
1034{\r
1035 SMRAM_SAVE_STATE_MAP *CpuSaveState;\r
1036 \r
1037 if (FeaturePcdGet (PcdCpuSmmDebug)) {\r
717fb604 1038 ASSERT(CpuIndex < mMaxNumberOfCpus);\r
3eed6dda 1039 CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];\r
f45f2d4a
JY
1040 if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {\r
1041 AsmWriteDr6 (CpuSaveState->x86._DR6);\r
1042 AsmWriteDr7 (CpuSaveState->x86._DR7);\r
1043 } else {\r
1044 AsmWriteDr6 ((UINTN)CpuSaveState->x64._DR6);\r
1045 AsmWriteDr7 ((UINTN)CpuSaveState->x64._DR7);\r
1046 }\r
1047 }\r
1048}\r
1049\r
1050/**\r
3eed6dda 1051 This function restores DR6 & DR7 to SMM save state.\r
f45f2d4a
JY
1052\r
1053 NOTE: It might not be appreciated in runtime since it might\r
1054 conflict with OS debugging facilities. Turn them off in RELEASE.\r
1055\r
1056 @param CpuIndex CPU Index\r
1057\r
1058**/\r
1059VOID\r
1060EFIAPI\r
1061CpuSmmDebugExit (\r
1062 IN UINTN CpuIndex\r
1063 )\r
1064{\r
1065 SMRAM_SAVE_STATE_MAP *CpuSaveState;\r
1066\r
1067 if (FeaturePcdGet (PcdCpuSmmDebug)) {\r
717fb604 1068 ASSERT(CpuIndex < mMaxNumberOfCpus);\r
3eed6dda 1069 CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];\r
f45f2d4a
JY
1070 if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {\r
1071 CpuSaveState->x86._DR7 = (UINT32)AsmReadDr7 ();\r
1072 CpuSaveState->x86._DR6 = (UINT32)AsmReadDr6 ();\r
1073 } else {\r
1074 CpuSaveState->x64._DR7 = AsmReadDr7 ();\r
1075 CpuSaveState->x64._DR6 = AsmReadDr6 ();\r
1076 }\r
1077 }\r
1078}\r
1079\r
529a5a86
MK
1080/**\r
1081 C function for SMI entry, each processor comes here upon SMI trigger.\r
1082\r
1083 @param CpuIndex CPU Index\r
1084\r
1085**/\r
1086VOID\r
1087EFIAPI\r
1088SmiRendezvous (\r
1089 IN UINTN CpuIndex\r
1090 )\r
1091{\r
f85d3ce2
JF
1092 EFI_STATUS Status;\r
1093 BOOLEAN ValidSmi;\r
1094 BOOLEAN IsBsp;\r
1095 BOOLEAN BspInProgress;\r
1096 UINTN Index;\r
1097 UINTN Cr2;\r
717fb604
JY
1098\r
1099 ASSERT(CpuIndex < mMaxNumberOfCpus);\r
529a5a86
MK
1100\r
1101 //\r
1102 // Save Cr2 because Page Fault exception in SMM may override its value\r
1103 //\r
1104 Cr2 = AsmReadCr2 ();\r
1105\r
1106 //\r
1107 // Perform CPU specific entry hooks\r
1108 //\r
1109 SmmCpuFeaturesRendezvousEntry (CpuIndex);\r
1110\r
1111 //\r
1112 // Determine if this is a valid SMI\r
1113 //\r
1114 ValidSmi = PlatformValidSmi();\r
1115\r
1116 //\r
1117 // Determine if BSP has been already in progress. Note this must be checked after\r
1118 // ValidSmi because BSP may clear a valid SMI source after checking in.\r
1119 //\r
fe3a75bc 1120 BspInProgress = *mSmmMpSyncData->InsideSmm;\r
529a5a86
MK
1121\r
1122 if (!BspInProgress && !ValidSmi) {\r
1123 //\r
1124 // If we reach here, it means when we sampled the ValidSmi flag, SMI status had not\r
1125 // been cleared by BSP in a new SMI run (so we have a truly invalid SMI), or SMI\r
1126 // status had been cleared by BSP and an existing SMI run has almost ended. (Note\r
1127 // we sampled ValidSmi flag BEFORE judging BSP-in-progress status.) In both cases, there\r
1128 // is nothing we need to do.\r
1129 //\r
1130 goto Exit;\r
1131 } else {\r
1132 //\r
1133 // Signal presence of this processor\r
1134 //\r
fe3a75bc 1135 if (ReleaseSemaphore (mSmmMpSyncData->Counter) == 0) {\r
529a5a86
MK
1136 //\r
1137 // BSP has already ended the synchronization, so QUIT!!!\r
1138 //\r
1139\r
1140 //\r
1141 // Wait for BSP's signal to finish SMI\r
1142 //\r
fe3a75bc 1143 while (*mSmmMpSyncData->AllCpusInSync) {\r
529a5a86
MK
1144 CpuPause ();\r
1145 }\r
1146 goto Exit;\r
1147 } else {\r
1148\r
1149 //\r
1150 // The BUSY lock is initialized to Released state.\r
1151 // This needs to be done early enough to be ready for BSP's SmmStartupThisAp() call.\r
1152 // E.g., with Relaxed AP flow, SmmStartupThisAp() may be called immediately\r
1153 // after AP's present flag is detected.\r
1154 //\r
ed3d5ecb 1155 InitializeSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
529a5a86
MK
1156 }\r
1157\r
529a5a86
MK
1158 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r
1159 ActivateSmmProfile (CpuIndex);\r
1160 }\r
1161\r
1162 if (BspInProgress) {\r
1163 //\r
1164 // BSP has been elected. Follow AP path, regardless of ValidSmi flag\r
1165 // as BSP may have cleared the SMI status\r
1166 //\r
1167 APHandler (CpuIndex, ValidSmi, mSmmMpSyncData->EffectiveSyncMode);\r
1168 } else {\r
1169 //\r
1170 // We have a valid SMI\r
1171 //\r
1172\r
1173 //\r
1174 // Elect BSP\r
1175 //\r
1176 IsBsp = FALSE;\r
1177 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {\r
1178 if (!mSmmMpSyncData->SwitchBsp || mSmmMpSyncData->CandidateBsp[CpuIndex]) {\r
1179 //\r
1180 // Call platform hook to do BSP election\r
1181 //\r
1182 Status = PlatformSmmBspElection (&IsBsp);\r
1183 if (EFI_SUCCESS == Status) {\r
1184 //\r
1185 // Platform hook determines successfully\r
1186 //\r
1187 if (IsBsp) {\r
1188 mSmmMpSyncData->BspIndex = (UINT32)CpuIndex;\r
1189 }\r
1190 } else {\r
1191 //\r
1192 // Platform hook fails to determine, use default BSP election method\r
1193 //\r
1194 InterlockedCompareExchange32 (\r
1195 (UINT32*)&mSmmMpSyncData->BspIndex,\r
1196 (UINT32)-1,\r
1197 (UINT32)CpuIndex\r
1198 );\r
1199 }\r
1200 }\r
1201 }\r
1202\r
1203 //\r
1204 // "mSmmMpSyncData->BspIndex == CpuIndex" means this is the BSP\r
1205 //\r
1206 if (mSmmMpSyncData->BspIndex == CpuIndex) {\r
1207\r
1208 //\r
1209 // Clear last request for SwitchBsp.\r
1210 //\r
1211 if (mSmmMpSyncData->SwitchBsp) {\r
1212 mSmmMpSyncData->SwitchBsp = FALSE;\r
1213 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
1214 mSmmMpSyncData->CandidateBsp[Index] = FALSE;\r
1215 }\r
1216 }\r
1217\r
1218 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r
1219 SmmProfileRecordSmiNum ();\r
1220 }\r
1221\r
1222 //\r
1223 // BSP Handler is always called with a ValidSmi == TRUE\r
1224 //\r
1225 BSPHandler (CpuIndex, mSmmMpSyncData->EffectiveSyncMode);\r
529a5a86
MK
1226 } else {\r
1227 APHandler (CpuIndex, ValidSmi, mSmmMpSyncData->EffectiveSyncMode);\r
1228 }\r
1229 }\r
1230\r
ed3d5ecb 1231 ASSERT (*mSmmMpSyncData->CpuData[CpuIndex].Run == 0);\r
529a5a86
MK
1232\r
1233 //\r
1234 // Wait for BSP's signal to exit SMI\r
1235 //\r
fe3a75bc 1236 while (*mSmmMpSyncData->AllCpusInSync) {\r
529a5a86
MK
1237 CpuPause ();\r
1238 }\r
1239 }\r
1240\r
1241Exit:\r
1242 SmmCpuFeaturesRendezvousExit (CpuIndex);\r
1243 //\r
1244 // Restore Cr2\r
1245 //\r
1246 AsmWriteCr2 (Cr2);\r
1247}\r
1248\r
1d648531
JF
1249/**\r
1250 Allocate buffer for all semaphores and spin locks.\r
1251\r
1252**/\r
1253VOID\r
1254InitializeSmmCpuSemaphores (\r
1255 VOID\r
1256 )\r
1257{\r
1258 UINTN ProcessorCount;\r
1259 UINTN TotalSize;\r
1260 UINTN GlobalSemaphoresSize;\r
4e920581 1261 UINTN CpuSemaphoresSize;\r
695e62d1 1262 UINTN MsrSemahporeSize;\r
1d648531
JF
1263 UINTN SemaphoreSize;\r
1264 UINTN Pages;\r
1265 UINTN *SemaphoreBlock;\r
1266 UINTN SemaphoreAddr;\r
1267\r
1268 SemaphoreSize = GetSpinLockProperties ();\r
1269 ProcessorCount = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;\r
1270 GlobalSemaphoresSize = (sizeof (SMM_CPU_SEMAPHORE_GLOBAL) / sizeof (VOID *)) * SemaphoreSize;\r
4e920581 1271 CpuSemaphoresSize = (sizeof (SMM_CPU_SEMAPHORE_CPU) / sizeof (VOID *)) * ProcessorCount * SemaphoreSize;\r
695e62d1
JF
1272 MsrSemahporeSize = MSR_SPIN_LOCK_INIT_NUM * SemaphoreSize;\r
1273 TotalSize = GlobalSemaphoresSize + CpuSemaphoresSize + MsrSemahporeSize;\r
1d648531
JF
1274 DEBUG((EFI_D_INFO, "One Semaphore Size = 0x%x\n", SemaphoreSize));\r
1275 DEBUG((EFI_D_INFO, "Total Semaphores Size = 0x%x\n", TotalSize));\r
1276 Pages = EFI_SIZE_TO_PAGES (TotalSize);\r
1277 SemaphoreBlock = AllocatePages (Pages);\r
1278 ASSERT (SemaphoreBlock != NULL);\r
1279 ZeroMem (SemaphoreBlock, TotalSize);\r
1280\r
1281 SemaphoreAddr = (UINTN)SemaphoreBlock;\r
1282 mSmmCpuSemaphores.SemaphoreGlobal.Counter = (UINT32 *)SemaphoreAddr;\r
1283 SemaphoreAddr += SemaphoreSize;\r
1284 mSmmCpuSemaphores.SemaphoreGlobal.InsideSmm = (BOOLEAN *)SemaphoreAddr;\r
1285 SemaphoreAddr += SemaphoreSize;\r
1286 mSmmCpuSemaphores.SemaphoreGlobal.AllCpusInSync = (BOOLEAN *)SemaphoreAddr;\r
1287 SemaphoreAddr += SemaphoreSize;\r
1288 mSmmCpuSemaphores.SemaphoreGlobal.PFLock = (SPIN_LOCK *)SemaphoreAddr;\r
1289 SemaphoreAddr += SemaphoreSize;\r
1290 mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock\r
1291 = (SPIN_LOCK *)SemaphoreAddr;\r
6c4c15fa
JF
1292 SemaphoreAddr += SemaphoreSize;\r
1293 mSmmCpuSemaphores.SemaphoreGlobal.MemoryMappedLock\r
1294 = (SPIN_LOCK *)SemaphoreAddr;\r
1295\r
4e920581
JF
1296 SemaphoreAddr = (UINTN)SemaphoreBlock + GlobalSemaphoresSize;\r
1297 mSmmCpuSemaphores.SemaphoreCpu.Busy = (SPIN_LOCK *)SemaphoreAddr;\r
1298 SemaphoreAddr += ProcessorCount * SemaphoreSize;\r
1299 mSmmCpuSemaphores.SemaphoreCpu.Run = (UINT32 *)SemaphoreAddr;\r
1300 SemaphoreAddr += ProcessorCount * SemaphoreSize;\r
1301 mSmmCpuSemaphores.SemaphoreCpu.Present = (BOOLEAN *)SemaphoreAddr;\r
1302\r
695e62d1
JF
1303 SemaphoreAddr = (UINTN)SemaphoreBlock + GlobalSemaphoresSize + CpuSemaphoresSize;\r
1304 mSmmCpuSemaphores.SemaphoreMsr.Msr = (SPIN_LOCK *)SemaphoreAddr;\r
1305 mSmmCpuSemaphores.SemaphoreMsr.AvailableCounter =\r
1306 ((UINTN)SemaphoreBlock + Pages * SIZE_4KB - SemaphoreAddr) / SemaphoreSize;\r
1307 ASSERT (mSmmCpuSemaphores.SemaphoreMsr.AvailableCounter >= MSR_SPIN_LOCK_INIT_NUM);\r
1308\r
fe3a75bc
JF
1309 mPFLock = mSmmCpuSemaphores.SemaphoreGlobal.PFLock;\r
1310 mConfigSmmCodeAccessCheckLock = mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock;\r
6c4c15fa 1311 mMemoryMappedLock = mSmmCpuSemaphores.SemaphoreGlobal.MemoryMappedLock;\r
fe3a75bc 1312\r
1d648531
JF
1313 mSemaphoreSize = SemaphoreSize;\r
1314}\r
529a5a86
MK
1315\r
1316/**\r
1317 Initialize un-cacheable data.\r
1318\r
1319**/\r
1320VOID\r
1321EFIAPI\r
1322InitializeMpSyncData (\r
1323 VOID\r
1324 )\r
1325{\r
8b9311b7
JF
1326 UINTN CpuIndex;\r
1327\r
529a5a86 1328 if (mSmmMpSyncData != NULL) {\r
e78a2a49
JF
1329 //\r
1330 // mSmmMpSyncDataSize includes one structure of SMM_DISPATCHER_MP_SYNC_DATA, one\r
1331 // CpuData array of SMM_CPU_DATA_BLOCK and one CandidateBsp array of BOOLEAN.\r
1332 //\r
1333 ZeroMem (mSmmMpSyncData, mSmmMpSyncDataSize);\r
529a5a86
MK
1334 mSmmMpSyncData->CpuData = (SMM_CPU_DATA_BLOCK *)((UINT8 *)mSmmMpSyncData + sizeof (SMM_DISPATCHER_MP_SYNC_DATA));\r
1335 mSmmMpSyncData->CandidateBsp = (BOOLEAN *)(mSmmMpSyncData->CpuData + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus);\r
1336 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {\r
1337 //\r
1338 // Enable BSP election by setting BspIndex to -1\r
1339 //\r
1340 mSmmMpSyncData->BspIndex = (UINT32)-1;\r
1341 }\r
b43dd229 1342 mSmmMpSyncData->EffectiveSyncMode = mCpuSmmSyncMode;\r
1d648531 1343\r
8b9311b7
JF
1344 mSmmMpSyncData->Counter = mSmmCpuSemaphores.SemaphoreGlobal.Counter;\r
1345 mSmmMpSyncData->InsideSmm = mSmmCpuSemaphores.SemaphoreGlobal.InsideSmm;\r
1346 mSmmMpSyncData->AllCpusInSync = mSmmCpuSemaphores.SemaphoreGlobal.AllCpusInSync;\r
1347 ASSERT (mSmmMpSyncData->Counter != NULL && mSmmMpSyncData->InsideSmm != NULL &&\r
1348 mSmmMpSyncData->AllCpusInSync != NULL);\r
1349 *mSmmMpSyncData->Counter = 0;\r
1350 *mSmmMpSyncData->InsideSmm = FALSE;\r
1351 *mSmmMpSyncData->AllCpusInSync = FALSE;\r
1352\r
1353 for (CpuIndex = 0; CpuIndex < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; CpuIndex ++) {\r
1354 mSmmMpSyncData->CpuData[CpuIndex].Busy =\r
1355 (SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Busy + mSemaphoreSize * CpuIndex);\r
1356 mSmmMpSyncData->CpuData[CpuIndex].Run =\r
1357 (UINT32 *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Run + mSemaphoreSize * CpuIndex);\r
1358 mSmmMpSyncData->CpuData[CpuIndex].Present =\r
1359 (BOOLEAN *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Present + mSemaphoreSize * CpuIndex);\r
56e4a7d7
JF
1360 *(mSmmMpSyncData->CpuData[CpuIndex].Busy) = 0;\r
1361 *(mSmmMpSyncData->CpuData[CpuIndex].Run) = 0;\r
1362 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;\r
8b9311b7 1363 }\r
529a5a86
MK
1364 }\r
1365}\r
1366\r
1367/**\r
1368 Initialize global data for MP synchronization.\r
1369\r
1370 @param Stacks Base address of SMI stack buffer for all processors.\r
1371 @param StackSize Stack size for each processor in SMM.\r
1372\r
1373**/\r
1374UINT32\r
1375InitializeMpServiceData (\r
1376 IN VOID *Stacks,\r
1377 IN UINTN StackSize\r
1378 )\r
1379{\r
1380 UINT32 Cr3;\r
1381 UINTN Index;\r
1382 MTRR_SETTINGS *Mtrr;\r
1383 PROCESSOR_SMM_DESCRIPTOR *Psd;\r
529a5a86 1384 UINT8 *GdtTssTables;\r
529a5a86
MK
1385 UINTN GdtTableStepSize;\r
1386\r
8b9311b7
JF
1387 //\r
1388 // Allocate memory for all locks and semaphores\r
1389 //\r
1390 InitializeSmmCpuSemaphores ();\r
1391\r
d67b73cc
JF
1392 //\r
1393 // Initialize mSmmMpSyncData\r
1394 //\r
1395 mSmmMpSyncDataSize = sizeof (SMM_DISPATCHER_MP_SYNC_DATA) +\r
1396 (sizeof (SMM_CPU_DATA_BLOCK) + sizeof (BOOLEAN)) * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;\r
1397 mSmmMpSyncData = (SMM_DISPATCHER_MP_SYNC_DATA*) AllocatePages (EFI_SIZE_TO_PAGES (mSmmMpSyncDataSize));\r
1398 ASSERT (mSmmMpSyncData != NULL);\r
b43dd229 1399 mCpuSmmSyncMode = (SMM_CPU_SYNC_MODE)PcdGet8 (PcdCpuSmmSyncMode);\r
d67b73cc
JF
1400 InitializeMpSyncData ();\r
1401\r
529a5a86
MK
1402 //\r
1403 // Initialize physical address mask\r
1404 // NOTE: Physical memory above virtual address limit is not supported !!!\r
1405 //\r
1406 AsmCpuid (0x80000008, (UINT32*)&Index, NULL, NULL, NULL);\r
1407 gPhyMask = LShiftU64 (1, (UINT8)Index) - 1;\r
1408 gPhyMask &= (1ull << 48) - EFI_PAGE_SIZE;\r
1409\r
1410 //\r
1411 // Create page tables\r
1412 //\r
1413 Cr3 = SmmInitPageTable ();\r
1414\r
fe5f1949 1415 GdtTssTables = InitGdt (Cr3, &GdtTableStepSize);\r
529a5a86
MK
1416\r
1417 //\r
1418 // Initialize PROCESSOR_SMM_DESCRIPTOR for each CPU\r
1419 //\r
1420 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
1421 Psd = (PROCESSOR_SMM_DESCRIPTOR *)(VOID *)(UINTN)(mCpuHotPlugData.SmBase[Index] + SMM_PSD_OFFSET);\r
1422 CopyMem (Psd, &gcPsd, sizeof (gcPsd));\r
fe5f1949
JY
1423 Psd->SmmGdtPtr = (UINT64)(UINTN)(GdtTssTables + GdtTableStepSize * Index);\r
1424 Psd->SmmGdtSize = gcSmiGdtr.Limit + 1;\r
529a5a86
MK
1425\r
1426 //\r
1427 // Install SMI handler\r
1428 //\r
1429 InstallSmiHandler (\r
1430 Index,\r
1431 (UINT32)mCpuHotPlugData.SmBase[Index],\r
1432 (VOID*)((UINTN)Stacks + (StackSize * Index)),\r
1433 StackSize,\r
1434 (UINTN)Psd->SmmGdtPtr,\r
1435 Psd->SmmGdtSize,\r
1436 gcSmiIdtr.Base,\r
1437 gcSmiIdtr.Limit + 1,\r
1438 Cr3\r
1439 );\r
1440 }\r
1441\r
529a5a86
MK
1442 //\r
1443 // Record current MTRR settings\r
1444 //\r
1445 ZeroMem(gSmiMtrrs, sizeof (gSmiMtrrs));\r
1446 Mtrr = (MTRR_SETTINGS*)gSmiMtrrs;\r
1447 MtrrGetAllMtrrs (Mtrr);\r
1448\r
1449 return Cr3;\r
1450}\r
1451\r
1452/**\r
1453\r
1454 Register the SMM Foundation entry point.\r
1455\r
1456 @param This Pointer to EFI_SMM_CONFIGURATION_PROTOCOL instance\r
1457 @param SmmEntryPoint SMM Foundation EntryPoint\r
1458\r
1459 @retval EFI_SUCCESS Successfully to register SMM foundation entry point\r
1460\r
1461**/\r
1462EFI_STATUS\r
1463EFIAPI\r
1464RegisterSmmEntry (\r
1465 IN CONST EFI_SMM_CONFIGURATION_PROTOCOL *This,\r
1466 IN EFI_SMM_ENTRY_POINT SmmEntryPoint\r
1467 )\r
1468{\r
1469 //\r
1470 // Record SMM Foundation EntryPoint, later invoke it on SMI entry vector.\r
1471 //\r
1472 gSmmCpuPrivate->SmmCoreEntry = SmmEntryPoint;\r
1473 return EFI_SUCCESS;\r
1474}\r