]> git.proxmox.com Git - mirror_edk2.git/blame - UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c
UefiCpuPkg/PiSmmCpuDxeSmm: Check ProcessorId == INVALID_APIC_ID
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / MpService.c
CommitLineData
529a5a86
MK
1/** @file\r
2SMM MP service implementation\r
3\r
e62a0eb6 4Copyright (c) 2009 - 2017, Intel Corporation. All rights reserved.<BR>\r
241f9149
LD
5Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>\r
6\r
529a5a86
MK
7This program and the accompanying materials\r
8are licensed and made available under the terms and conditions of the BSD License\r
9which accompanies this distribution. The full text of the license may be found at\r
10http://opensource.org/licenses/bsd-license.php\r
11\r
12THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
13WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
14\r
15**/\r
16\r
17#include "PiSmmCpuDxeSmm.h"\r
18\r
19//\r
20// Slots for all MTRR( FIXED MTRR + VARIABLE MTRR + MTRR_LIB_IA32_MTRR_DEF_TYPE)\r
21//\r
26ab5ac3 22MTRR_SETTINGS gSmiMtrrs;\r
529a5a86
MK
23UINT64 gPhyMask;\r
24SMM_DISPATCHER_MP_SYNC_DATA *mSmmMpSyncData = NULL;\r
25UINTN mSmmMpSyncDataSize;\r
1d648531
JF
26SMM_CPU_SEMAPHORES mSmmCpuSemaphores;\r
27UINTN mSemaphoreSize;\r
fe3a75bc 28SPIN_LOCK *mPFLock = NULL;\r
b43dd229 29SMM_CPU_SYNC_MODE mCpuSmmSyncMode;\r
529a5a86
MK
30\r
31/**\r
32 Performs an atomic compare exchange operation to get semaphore.\r
33 The compare exchange operation must be performed using\r
34 MP safe mechanisms.\r
35\r
36 @param Sem IN: 32-bit unsigned integer\r
37 OUT: original integer - 1\r
38 @return Original integer - 1\r
39\r
40**/\r
41UINT32\r
42WaitForSemaphore (\r
43 IN OUT volatile UINT32 *Sem\r
44 )\r
45{\r
46 UINT32 Value;\r
47\r
48 do {\r
49 Value = *Sem;\r
50 } while (Value == 0 ||\r
51 InterlockedCompareExchange32 (\r
52 (UINT32*)Sem,\r
53 Value,\r
54 Value - 1\r
55 ) != Value);\r
56 return Value - 1;\r
57}\r
58\r
59\r
60/**\r
61 Performs an atomic compare exchange operation to release semaphore.\r
62 The compare exchange operation must be performed using\r
63 MP safe mechanisms.\r
64\r
65 @param Sem IN: 32-bit unsigned integer\r
66 OUT: original integer + 1\r
67 @return Original integer + 1\r
68\r
69**/\r
70UINT32\r
71ReleaseSemaphore (\r
72 IN OUT volatile UINT32 *Sem\r
73 )\r
74{\r
75 UINT32 Value;\r
76\r
77 do {\r
78 Value = *Sem;\r
79 } while (Value + 1 != 0 &&\r
80 InterlockedCompareExchange32 (\r
81 (UINT32*)Sem,\r
82 Value,\r
83 Value + 1\r
84 ) != Value);\r
85 return Value + 1;\r
86}\r
87\r
88/**\r
89 Performs an atomic compare exchange operation to lock semaphore.\r
90 The compare exchange operation must be performed using\r
91 MP safe mechanisms.\r
92\r
93 @param Sem IN: 32-bit unsigned integer\r
94 OUT: -1\r
95 @return Original integer\r
96\r
97**/\r
98UINT32\r
99LockdownSemaphore (\r
100 IN OUT volatile UINT32 *Sem\r
101 )\r
102{\r
103 UINT32 Value;\r
104\r
105 do {\r
106 Value = *Sem;\r
107 } while (InterlockedCompareExchange32 (\r
108 (UINT32*)Sem,\r
109 Value, (UINT32)-1\r
110 ) != Value);\r
111 return Value;\r
112}\r
113\r
114/**\r
115 Wait all APs to performs an atomic compare exchange operation to release semaphore.\r
116\r
117 @param NumberOfAPs AP number\r
118\r
119**/\r
120VOID\r
121WaitForAllAPs (\r
122 IN UINTN NumberOfAPs\r
123 )\r
124{\r
125 UINTN BspIndex;\r
126\r
127 BspIndex = mSmmMpSyncData->BspIndex;\r
128 while (NumberOfAPs-- > 0) {\r
ed3d5ecb 129 WaitForSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
529a5a86
MK
130 }\r
131}\r
132\r
133/**\r
134 Performs an atomic compare exchange operation to release semaphore\r
135 for each AP.\r
136\r
137**/\r
138VOID\r
139ReleaseAllAPs (\r
140 VOID\r
141 )\r
142{\r
143 UINTN Index;\r
144 UINTN BspIndex;\r
145\r
146 BspIndex = mSmmMpSyncData->BspIndex;\r
147 for (Index = mMaxNumberOfCpus; Index-- > 0;) {\r
ed3d5ecb
JF
148 if (Index != BspIndex && *(mSmmMpSyncData->CpuData[Index].Present)) {\r
149 ReleaseSemaphore (mSmmMpSyncData->CpuData[Index].Run);\r
529a5a86
MK
150 }\r
151 }\r
152}\r
153\r
154/**\r
155 Checks if all CPUs (with certain exceptions) have checked in for this SMI run\r
156\r
157 @param Exceptions CPU Arrival exception flags.\r
158\r
159 @retval TRUE if all CPUs the have checked in.\r
160 @retval FALSE if at least one Normal AP hasn't checked in.\r
161\r
162**/\r
163BOOLEAN\r
164AllCpusInSmmWithExceptions (\r
165 SMM_CPU_ARRIVAL_EXCEPTIONS Exceptions\r
166 )\r
167{\r
168 UINTN Index;\r
169 SMM_CPU_DATA_BLOCK *CpuData;\r
170 EFI_PROCESSOR_INFORMATION *ProcessorInfo;\r
171\r
fe3a75bc 172 ASSERT (*mSmmMpSyncData->Counter <= mNumberOfCpus);\r
529a5a86 173\r
fe3a75bc 174 if (*mSmmMpSyncData->Counter == mNumberOfCpus) {\r
529a5a86
MK
175 return TRUE;\r
176 }\r
177\r
178 CpuData = mSmmMpSyncData->CpuData;\r
179 ProcessorInfo = gSmmCpuPrivate->ProcessorInfo;\r
180 for (Index = mMaxNumberOfCpus; Index-- > 0;) {\r
ed3d5ecb 181 if (!(*(CpuData[Index].Present)) && ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID) {\r
529a5a86
MK
182 if (((Exceptions & ARRIVAL_EXCEPTION_DELAYED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmDelayed) != 0) {\r
183 continue;\r
184 }\r
185 if (((Exceptions & ARRIVAL_EXCEPTION_BLOCKED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmBlocked) != 0) {\r
186 continue;\r
187 }\r
188 if (((Exceptions & ARRIVAL_EXCEPTION_SMI_DISABLED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmEnable) != 0) {\r
189 continue;\r
190 }\r
191 return FALSE;\r
192 }\r
193 }\r
194\r
195\r
196 return TRUE;\r
197}\r
198\r
199\r
200/**\r
201 Given timeout constraint, wait for all APs to arrive, and insure when this function returns, no AP will execute normal mode code before\r
202 entering SMM, except SMI disabled APs.\r
203\r
204**/\r
205VOID\r
206SmmWaitForApArrival (\r
207 VOID\r
208 )\r
209{\r
210 UINT64 Timer;\r
211 UINTN Index;\r
212\r
fe3a75bc 213 ASSERT (*mSmmMpSyncData->Counter <= mNumberOfCpus);\r
529a5a86
MK
214\r
215 //\r
216 // Platform implementor should choose a timeout value appropriately:\r
217 // - The timeout value should balance the SMM time constrains and the likelihood that delayed CPUs are excluded in the SMM run. Note\r
218 // the SMI Handlers must ALWAYS take into account the cases that not all APs are available in an SMI run.\r
219 // - The timeout value must, in the case of 2nd timeout, be at least long enough to give time for all APs to receive the SMI IPI\r
220 // and either enter SMM or buffer the SMI, to insure there is no CPU running normal mode code when SMI handling starts. This will\r
221 // be TRUE even if a blocked CPU is brought out of the blocked state by a normal mode CPU (before the normal mode CPU received the\r
222 // SMI IPI), because with a buffered SMI, and CPU will enter SMM immediately after it is brought out of the blocked state.\r
223 // - The timeout value must be longer than longest possible IO operation in the system\r
224 //\r
225\r
226 //\r
227 // Sync with APs 1st timeout\r
228 //\r
229 for (Timer = StartSyncTimer ();\r
230 !IsSyncTimerTimeout (Timer) &&\r
231 !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED | ARRIVAL_EXCEPTION_SMI_DISABLED );\r
232 ) {\r
233 CpuPause ();\r
234 }\r
235\r
236 //\r
237 // Not all APs have arrived, so we need 2nd round of timeout. IPIs should be sent to ALL none present APs,\r
238 // because:\r
239 // a) Delayed AP may have just come out of the delayed state. Blocked AP may have just been brought out of blocked state by some AP running\r
240 // normal mode code. These APs need to be guaranteed to have an SMI pending to insure that once they are out of delayed / blocked state, they\r
241 // enter SMI immediately without executing instructions in normal mode. Note traditional flow requires there are no APs doing normal mode\r
242 // work while SMI handling is on-going.\r
243 // b) As a consequence of SMI IPI sending, (spurious) SMI may occur after this SMM run.\r
244 // c) ** NOTE **: Use SMI disabling feature VERY CAREFULLY (if at all) for traditional flow, because a processor in SMI-disabled state\r
245 // will execute normal mode code, which breaks the traditional SMI handlers' assumption that no APs are doing normal\r
246 // mode work while SMI handling is on-going.\r
247 // d) We don't add code to check SMI disabling status to skip sending IPI to SMI disabled APs, because:\r
248 // - In traditional flow, SMI disabling is discouraged.\r
249 // - In relaxed flow, CheckApArrival() will check SMI disabling status before calling this function.\r
250 // In both cases, adding SMI-disabling checking code increases overhead.\r
251 //\r
fe3a75bc 252 if (*mSmmMpSyncData->Counter < mNumberOfCpus) {\r
529a5a86
MK
253 //\r
254 // Send SMI IPIs to bring outside processors in\r
255 //\r
256 for (Index = mMaxNumberOfCpus; Index-- > 0;) {\r
ed3d5ecb 257 if (!(*(mSmmMpSyncData->CpuData[Index].Present)) && gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID) {\r
529a5a86
MK
258 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId);\r
259 }\r
260 }\r
261\r
262 //\r
263 // Sync with APs 2nd timeout.\r
264 //\r
265 for (Timer = StartSyncTimer ();\r
266 !IsSyncTimerTimeout (Timer) &&\r
267 !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED | ARRIVAL_EXCEPTION_SMI_DISABLED );\r
268 ) {\r
269 CpuPause ();\r
270 }\r
271 }\r
272\r
273 return;\r
274}\r
275\r
276\r
277/**\r
278 Replace OS MTRR's with SMI MTRR's.\r
279\r
280 @param CpuIndex Processor Index\r
281\r
282**/\r
283VOID\r
284ReplaceOSMtrrs (\r
285 IN UINTN CpuIndex\r
286 )\r
287{\r
529a5a86
MK
288 SmmCpuFeaturesDisableSmrr ();\r
289\r
290 //\r
291 // Replace all MTRRs registers\r
292 //\r
26ab5ac3 293 MtrrSetAllMtrrs (&gSmiMtrrs);\r
529a5a86
MK
294}\r
295\r
296/**\r
297 SMI handler for BSP.\r
298\r
299 @param CpuIndex BSP processor Index\r
300 @param SyncMode SMM MP sync mode\r
301\r
302**/\r
303VOID\r
304BSPHandler (\r
305 IN UINTN CpuIndex,\r
306 IN SMM_CPU_SYNC_MODE SyncMode\r
307 )\r
308{\r
309 UINTN Index;\r
310 MTRR_SETTINGS Mtrrs;\r
311 UINTN ApCount;\r
312 BOOLEAN ClearTopLevelSmiResult;\r
313 UINTN PresentCount;\r
314\r
315 ASSERT (CpuIndex == mSmmMpSyncData->BspIndex);\r
316 ApCount = 0;\r
317\r
318 //\r
319 // Flag BSP's presence\r
320 //\r
fe3a75bc 321 *mSmmMpSyncData->InsideSmm = TRUE;\r
529a5a86
MK
322\r
323 //\r
324 // Initialize Debug Agent to start source level debug in BSP handler\r
325 //\r
326 InitializeDebugAgent (DEBUG_AGENT_INIT_ENTER_SMI, NULL, NULL);\r
327\r
328 //\r
329 // Mark this processor's presence\r
330 //\r
ed3d5ecb 331 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = TRUE;\r
529a5a86
MK
332\r
333 //\r
334 // Clear platform top level SMI status bit before calling SMI handlers. If\r
335 // we cleared it after SMI handlers are run, we would miss the SMI that\r
336 // occurs after SMI handlers are done and before SMI status bit is cleared.\r
337 //\r
338 ClearTopLevelSmiResult = ClearTopLevelSmiStatus();\r
339 ASSERT (ClearTopLevelSmiResult == TRUE);\r
340\r
341 //\r
342 // Set running processor index\r
343 //\r
344 gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu = CpuIndex;\r
345\r
346 //\r
347 // If Traditional Sync Mode or need to configure MTRRs: gather all available APs.\r
348 //\r
349 if (SyncMode == SmmCpuSyncModeTradition || SmmCpuFeaturesNeedConfigureMtrrs()) {\r
350\r
351 //\r
352 // Wait for APs to arrive\r
353 //\r
354 SmmWaitForApArrival();\r
355\r
356 //\r
357 // Lock the counter down and retrieve the number of APs\r
358 //\r
fe3a75bc
JF
359 *mSmmMpSyncData->AllCpusInSync = TRUE;\r
360 ApCount = LockdownSemaphore (mSmmMpSyncData->Counter) - 1;\r
529a5a86
MK
361\r
362 //\r
363 // Wait for all APs to get ready for programming MTRRs\r
364 //\r
365 WaitForAllAPs (ApCount);\r
366\r
367 if (SmmCpuFeaturesNeedConfigureMtrrs()) {\r
368 //\r
369 // Signal all APs it's time for backup MTRRs\r
370 //\r
371 ReleaseAllAPs ();\r
372\r
373 //\r
374 // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at\r
375 // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set\r
376 // to a large enough value to avoid this situation.\r
377 // Note: For HT capable CPUs, threads within a core share the same set of MTRRs.\r
378 // We do the backup first and then set MTRR to avoid race condition for threads\r
379 // in the same core.\r
380 //\r
381 MtrrGetAllMtrrs(&Mtrrs);\r
382\r
383 //\r
384 // Wait for all APs to complete their MTRR saving\r
385 //\r
386 WaitForAllAPs (ApCount);\r
387\r
388 //\r
389 // Let all processors program SMM MTRRs together\r
390 //\r
391 ReleaseAllAPs ();\r
392\r
393 //\r
394 // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at\r
395 // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set\r
396 // to a large enough value to avoid this situation.\r
397 //\r
398 ReplaceOSMtrrs (CpuIndex);\r
399\r
400 //\r
401 // Wait for all APs to complete their MTRR programming\r
402 //\r
403 WaitForAllAPs (ApCount);\r
404 }\r
405 }\r
406\r
407 //\r
408 // The BUSY lock is initialized to Acquired state\r
409 //\r
170a3c1e 410 AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
529a5a86
MK
411\r
412 //\r
9f419739 413 // Perform the pre tasks\r
529a5a86 414 //\r
9f419739 415 PerformPreTasks ();\r
529a5a86
MK
416\r
417 //\r
418 // Invoke SMM Foundation EntryPoint with the processor information context.\r
419 //\r
420 gSmmCpuPrivate->SmmCoreEntry (&gSmmCpuPrivate->SmmCoreEntryContext);\r
421\r
422 //\r
423 // Make sure all APs have completed their pending none-block tasks\r
424 //\r
425 for (Index = mMaxNumberOfCpus; Index-- > 0;) {\r
ed3d5ecb
JF
426 if (Index != CpuIndex && *(mSmmMpSyncData->CpuData[Index].Present)) {\r
427 AcquireSpinLock (mSmmMpSyncData->CpuData[Index].Busy);\r
428 ReleaseSpinLock (mSmmMpSyncData->CpuData[Index].Busy);\r
529a5a86
MK
429 }\r
430 }\r
431\r
432 //\r
433 // Perform the remaining tasks\r
434 //\r
435 PerformRemainingTasks ();\r
436\r
437 //\r
438 // If Relaxed-AP Sync Mode: gather all available APs after BSP SMM handlers are done, and\r
439 // make those APs to exit SMI synchronously. APs which arrive later will be excluded and\r
440 // will run through freely.\r
441 //\r
442 if (SyncMode != SmmCpuSyncModeTradition && !SmmCpuFeaturesNeedConfigureMtrrs()) {\r
443\r
444 //\r
445 // Lock the counter down and retrieve the number of APs\r
446 //\r
fe3a75bc
JF
447 *mSmmMpSyncData->AllCpusInSync = TRUE;\r
448 ApCount = LockdownSemaphore (mSmmMpSyncData->Counter) - 1;\r
529a5a86
MK
449 //\r
450 // Make sure all APs have their Present flag set\r
451 //\r
452 while (TRUE) {\r
453 PresentCount = 0;\r
454 for (Index = mMaxNumberOfCpus; Index-- > 0;) {\r
ed3d5ecb 455 if (*(mSmmMpSyncData->CpuData[Index].Present)) {\r
529a5a86
MK
456 PresentCount ++;\r
457 }\r
458 }\r
459 if (PresentCount > ApCount) {\r
460 break;\r
461 }\r
462 }\r
463 }\r
464\r
465 //\r
466 // Notify all APs to exit\r
467 //\r
fe3a75bc 468 *mSmmMpSyncData->InsideSmm = FALSE;\r
529a5a86
MK
469 ReleaseAllAPs ();\r
470\r
471 //\r
472 // Wait for all APs to complete their pending tasks\r
473 //\r
474 WaitForAllAPs (ApCount);\r
475\r
476 if (SmmCpuFeaturesNeedConfigureMtrrs()) {\r
477 //\r
478 // Signal APs to restore MTRRs\r
479 //\r
480 ReleaseAllAPs ();\r
481\r
482 //\r
483 // Restore OS MTRRs\r
484 //\r
485 SmmCpuFeaturesReenableSmrr ();\r
486 MtrrSetAllMtrrs(&Mtrrs);\r
487\r
488 //\r
489 // Wait for all APs to complete MTRR programming\r
490 //\r
491 WaitForAllAPs (ApCount);\r
492 }\r
493\r
494 //\r
495 // Stop source level debug in BSP handler, the code below will not be\r
496 // debugged.\r
497 //\r
498 InitializeDebugAgent (DEBUG_AGENT_INIT_EXIT_SMI, NULL, NULL);\r
499\r
500 //\r
501 // Signal APs to Reset states/semaphore for this processor\r
502 //\r
503 ReleaseAllAPs ();\r
504\r
505 //\r
506 // Perform pending operations for hot-plug\r
507 //\r
508 SmmCpuUpdate ();\r
509\r
510 //\r
511 // Clear the Present flag of BSP\r
512 //\r
ed3d5ecb 513 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;\r
529a5a86
MK
514\r
515 //\r
516 // Gather APs to exit SMM synchronously. Note the Present flag is cleared by now but\r
517 // WaitForAllAps does not depend on the Present flag.\r
518 //\r
519 WaitForAllAPs (ApCount);\r
520\r
521 //\r
522 // Reset BspIndex to -1, meaning BSP has not been elected.\r
523 //\r
524 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {\r
525 mSmmMpSyncData->BspIndex = (UINT32)-1;\r
526 }\r
527\r
528 //\r
529 // Allow APs to check in from this point on\r
530 //\r
fe3a75bc
JF
531 *mSmmMpSyncData->Counter = 0;\r
532 *mSmmMpSyncData->AllCpusInSync = FALSE;\r
529a5a86
MK
533}\r
534\r
535/**\r
536 SMI handler for AP.\r
537\r
538 @param CpuIndex AP processor Index.\r
539 @param ValidSmi Indicates that current SMI is a valid SMI or not.\r
540 @param SyncMode SMM MP sync mode.\r
541\r
542**/\r
543VOID\r
544APHandler (\r
545 IN UINTN CpuIndex,\r
546 IN BOOLEAN ValidSmi,\r
547 IN SMM_CPU_SYNC_MODE SyncMode\r
548 )\r
549{\r
550 UINT64 Timer;\r
551 UINTN BspIndex;\r
552 MTRR_SETTINGS Mtrrs;\r
553\r
554 //\r
555 // Timeout BSP\r
556 //\r
557 for (Timer = StartSyncTimer ();\r
558 !IsSyncTimerTimeout (Timer) &&\r
fe3a75bc 559 !(*mSmmMpSyncData->InsideSmm);\r
529a5a86
MK
560 ) {\r
561 CpuPause ();\r
562 }\r
563\r
fe3a75bc 564 if (!(*mSmmMpSyncData->InsideSmm)) {\r
529a5a86
MK
565 //\r
566 // BSP timeout in the first round\r
567 //\r
568 if (mSmmMpSyncData->BspIndex != -1) {\r
569 //\r
570 // BSP Index is known\r
571 //\r
572 BspIndex = mSmmMpSyncData->BspIndex;\r
573 ASSERT (CpuIndex != BspIndex);\r
574\r
575 //\r
576 // Send SMI IPI to bring BSP in\r
577 //\r
578 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[BspIndex].ProcessorId);\r
579\r
580 //\r
581 // Now clock BSP for the 2nd time\r
582 //\r
583 for (Timer = StartSyncTimer ();\r
584 !IsSyncTimerTimeout (Timer) &&\r
fe3a75bc 585 !(*mSmmMpSyncData->InsideSmm);\r
529a5a86
MK
586 ) {\r
587 CpuPause ();\r
588 }\r
589\r
fe3a75bc 590 if (!(*mSmmMpSyncData->InsideSmm)) {\r
529a5a86
MK
591 //\r
592 // Give up since BSP is unable to enter SMM\r
593 // and signal the completion of this AP\r
fe3a75bc 594 WaitForSemaphore (mSmmMpSyncData->Counter);\r
529a5a86
MK
595 return;\r
596 }\r
597 } else {\r
598 //\r
599 // Don't know BSP index. Give up without sending IPI to BSP.\r
600 //\r
fe3a75bc 601 WaitForSemaphore (mSmmMpSyncData->Counter);\r
529a5a86
MK
602 return;\r
603 }\r
604 }\r
605\r
606 //\r
607 // BSP is available\r
608 //\r
609 BspIndex = mSmmMpSyncData->BspIndex;\r
610 ASSERT (CpuIndex != BspIndex);\r
611\r
612 //\r
613 // Mark this processor's presence\r
614 //\r
ed3d5ecb 615 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = TRUE;\r
529a5a86
MK
616\r
617 if (SyncMode == SmmCpuSyncModeTradition || SmmCpuFeaturesNeedConfigureMtrrs()) {\r
618 //\r
619 // Notify BSP of arrival at this point\r
620 //\r
ed3d5ecb 621 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
529a5a86
MK
622 }\r
623\r
624 if (SmmCpuFeaturesNeedConfigureMtrrs()) {\r
625 //\r
626 // Wait for the signal from BSP to backup MTRRs\r
627 //\r
ed3d5ecb 628 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r
529a5a86
MK
629\r
630 //\r
631 // Backup OS MTRRs\r
632 //\r
633 MtrrGetAllMtrrs(&Mtrrs);\r
634\r
635 //\r
636 // Signal BSP the completion of this AP\r
637 //\r
ed3d5ecb 638 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
529a5a86
MK
639\r
640 //\r
641 // Wait for BSP's signal to program MTRRs\r
642 //\r
ed3d5ecb 643 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r
529a5a86
MK
644\r
645 //\r
646 // Replace OS MTRRs with SMI MTRRs\r
647 //\r
648 ReplaceOSMtrrs (CpuIndex);\r
649\r
650 //\r
651 // Signal BSP the completion of this AP\r
652 //\r
ed3d5ecb 653 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
529a5a86
MK
654 }\r
655\r
656 while (TRUE) {\r
657 //\r
658 // Wait for something to happen\r
659 //\r
ed3d5ecb 660 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r
529a5a86
MK
661\r
662 //\r
663 // Check if BSP wants to exit SMM\r
664 //\r
fe3a75bc 665 if (!(*mSmmMpSyncData->InsideSmm)) {\r
529a5a86
MK
666 break;\r
667 }\r
668\r
669 //\r
670 // BUSY should be acquired by SmmStartupThisAp()\r
671 //\r
672 ASSERT (\r
ed3d5ecb 673 !AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[CpuIndex].Busy)\r
529a5a86
MK
674 );\r
675\r
676 //\r
677 // Invoke the scheduled procedure\r
678 //\r
679 (*mSmmMpSyncData->CpuData[CpuIndex].Procedure) (\r
680 (VOID*)mSmmMpSyncData->CpuData[CpuIndex].Parameter\r
681 );\r
682\r
683 //\r
684 // Release BUSY\r
685 //\r
ed3d5ecb 686 ReleaseSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
529a5a86
MK
687 }\r
688\r
689 if (SmmCpuFeaturesNeedConfigureMtrrs()) {\r
690 //\r
691 // Notify BSP the readiness of this AP to program MTRRs\r
692 //\r
ed3d5ecb 693 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
529a5a86
MK
694\r
695 //\r
696 // Wait for the signal from BSP to program MTRRs\r
697 //\r
ed3d5ecb 698 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r
529a5a86
MK
699\r
700 //\r
701 // Restore OS MTRRs\r
702 //\r
703 SmmCpuFeaturesReenableSmrr ();\r
704 MtrrSetAllMtrrs(&Mtrrs);\r
705 }\r
706\r
707 //\r
708 // Notify BSP the readiness of this AP to Reset states/semaphore for this processor\r
709 //\r
ed3d5ecb 710 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
529a5a86
MK
711\r
712 //\r
713 // Wait for the signal from BSP to Reset states/semaphore for this processor\r
714 //\r
ed3d5ecb 715 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r
529a5a86
MK
716\r
717 //\r
718 // Reset states/semaphore for this processor\r
719 //\r
ed3d5ecb 720 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;\r
529a5a86
MK
721\r
722 //\r
723 // Notify BSP the readiness of this AP to exit SMM\r
724 //\r
ed3d5ecb 725 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
529a5a86
MK
726\r
727}\r
728\r
729/**\r
730 Create 4G PageTable in SMRAM.\r
731\r
717fb604 732 @param[in] Is32BitPageTable Whether the page table is 32-bit PAE\r
529a5a86
MK
733 @return PageTable Address\r
734\r
735**/\r
736UINT32\r
737Gen4GPageTable (\r
881520ea 738 IN BOOLEAN Is32BitPageTable\r
529a5a86
MK
739 )\r
740{\r
741 VOID *PageTable;\r
742 UINTN Index;\r
743 UINT64 *Pte;\r
744 UINTN PagesNeeded;\r
745 UINTN Low2MBoundary;\r
746 UINTN High2MBoundary;\r
747 UINTN Pages;\r
748 UINTN GuardPage;\r
749 UINT64 *Pdpte;\r
750 UINTN PageIndex;\r
751 UINTN PageAddress;\r
752\r
753 Low2MBoundary = 0;\r
754 High2MBoundary = 0;\r
755 PagesNeeded = 0;\r
756 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
757 //\r
758 // Add one more page for known good stack, then find the lower 2MB aligned address.\r
759 //\r
760 Low2MBoundary = (mSmmStackArrayBase + EFI_PAGE_SIZE) & ~(SIZE_2MB-1);\r
761 //\r
762 // Add two more pages for known good stack and stack guard page,\r
763 // then find the lower 2MB aligned address.\r
764 //\r
765 High2MBoundary = (mSmmStackArrayEnd - mSmmStackSize + EFI_PAGE_SIZE * 2) & ~(SIZE_2MB-1);\r
766 PagesNeeded = ((High2MBoundary - Low2MBoundary) / SIZE_2MB) + 1;\r
767 }\r
768 //\r
769 // Allocate the page table\r
770 //\r
717fb604 771 PageTable = AllocatePageTableMemory (5 + PagesNeeded);\r
529a5a86
MK
772 ASSERT (PageTable != NULL);\r
773\r
717fb604 774 PageTable = (VOID *)((UINTN)PageTable);\r
529a5a86
MK
775 Pte = (UINT64*)PageTable;\r
776\r
777 //\r
778 // Zero out all page table entries first\r
779 //\r
780 ZeroMem (Pte, EFI_PAGES_TO_SIZE (1));\r
781\r
782 //\r
783 // Set Page Directory Pointers\r
784 //\r
785 for (Index = 0; Index < 4; Index++) {\r
e62a0eb6 786 Pte[Index] = ((UINTN)PageTable + EFI_PAGE_SIZE * (Index + 1)) | mAddressEncMask |\r
241f9149 787 (Is32BitPageTable ? IA32_PAE_PDPTE_ATTRIBUTE_BITS : PAGE_ATTRIBUTE_BITS);\r
529a5a86
MK
788 }\r
789 Pte += EFI_PAGE_SIZE / sizeof (*Pte);\r
790\r
791 //\r
792 // Fill in Page Directory Entries\r
793 //\r
794 for (Index = 0; Index < EFI_PAGE_SIZE * 4 / sizeof (*Pte); Index++) {\r
241f9149 795 Pte[Index] = (Index << 21) | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;\r
529a5a86
MK
796 }\r
797\r
798 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
799 Pages = (UINTN)PageTable + EFI_PAGES_TO_SIZE (5);\r
800 GuardPage = mSmmStackArrayBase + EFI_PAGE_SIZE;\r
801 Pdpte = (UINT64*)PageTable;\r
802 for (PageIndex = Low2MBoundary; PageIndex <= High2MBoundary; PageIndex += SIZE_2MB) {\r
241f9149
LD
803 Pte = (UINT64*)(UINTN)(Pdpte[BitFieldRead32 ((UINT32)PageIndex, 30, 31)] & ~mAddressEncMask & ~(EFI_PAGE_SIZE - 1));\r
804 Pte[BitFieldRead32 ((UINT32)PageIndex, 21, 29)] = (UINT64)Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
529a5a86
MK
805 //\r
806 // Fill in Page Table Entries\r
807 //\r
808 Pte = (UINT64*)Pages;\r
809 PageAddress = PageIndex;\r
810 for (Index = 0; Index < EFI_PAGE_SIZE / sizeof (*Pte); Index++) {\r
811 if (PageAddress == GuardPage) {\r
812 //\r
813 // Mark the guard page as non-present\r
814 //\r
241f9149 815 Pte[Index] = PageAddress | mAddressEncMask;\r
529a5a86
MK
816 GuardPage += mSmmStackSize;\r
817 if (GuardPage > mSmmStackArrayEnd) {\r
818 GuardPage = 0;\r
819 }\r
820 } else {\r
241f9149 821 Pte[Index] = PageAddress | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
529a5a86
MK
822 }\r
823 PageAddress+= EFI_PAGE_SIZE;\r
824 }\r
825 Pages += EFI_PAGE_SIZE;\r
826 }\r
827 }\r
828\r
829 return (UINT32)(UINTN)PageTable;\r
830}\r
831\r
529a5a86
MK
832/**\r
833 Schedule a procedure to run on the specified CPU.\r
834\r
717fb604
JY
835 @param[in] Procedure The address of the procedure to run\r
836 @param[in] CpuIndex Target CPU Index\r
367284e7 837 @param[in, out] ProcArguments The parameter to pass to the procedure\r
717fb604 838 @param[in] BlockingMode Startup AP in blocking mode or not\r
529a5a86
MK
839\r
840 @retval EFI_INVALID_PARAMETER CpuNumber not valid\r
841 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP\r
842 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM\r
843 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy\r
844 @retval EFI_SUCCESS The procedure has been successfully scheduled\r
845\r
846**/\r
847EFI_STATUS\r
717fb604 848InternalSmmStartupThisAp (\r
529a5a86
MK
849 IN EFI_AP_PROCEDURE Procedure,\r
850 IN UINTN CpuIndex,\r
717fb604
JY
851 IN OUT VOID *ProcArguments OPTIONAL,\r
852 IN BOOLEAN BlockingMode\r
529a5a86
MK
853 )\r
854{\r
717fb604
JY
855 if (CpuIndex >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus) {\r
856 DEBUG((DEBUG_ERROR, "CpuIndex(%d) >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus(%d)\n", CpuIndex, gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus));\r
857 return EFI_INVALID_PARAMETER;\r
858 }\r
859 if (CpuIndex == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) {\r
860 DEBUG((DEBUG_ERROR, "CpuIndex(%d) == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu\n", CpuIndex));\r
529a5a86
MK
861 return EFI_INVALID_PARAMETER;\r
862 }\r
b7025df8
JF
863 if (gSmmCpuPrivate->ProcessorInfo[CpuIndex].ProcessorId == INVALID_APIC_ID) {\r
864 return EFI_INVALID_PARAMETER;\r
865 }\r
717fb604
JY
866 if (!(*(mSmmMpSyncData->CpuData[CpuIndex].Present))) {\r
867 if (mSmmMpSyncData->EffectiveSyncMode == SmmCpuSyncModeTradition) {\r
868 DEBUG((DEBUG_ERROR, "!mSmmMpSyncData->CpuData[%d].Present\n", CpuIndex));\r
869 }\r
870 return EFI_INVALID_PARAMETER;\r
871 }\r
872 if (gSmmCpuPrivate->Operation[CpuIndex] == SmmCpuRemove) {\r
873 if (!FeaturePcdGet (PcdCpuHotPlugSupport)) {\r
874 DEBUG((DEBUG_ERROR, "gSmmCpuPrivate->Operation[%d] == SmmCpuRemove\n", CpuIndex));\r
875 }\r
876 return EFI_INVALID_PARAMETER;\r
877 }\r
878\r
879 if (BlockingMode) {\r
880 AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
881 } else {\r
882 if (!AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[CpuIndex].Busy)) {\r
883 DEBUG((DEBUG_ERROR, "mSmmMpSyncData->CpuData[%d].Busy\n", CpuIndex));\r
884 return EFI_INVALID_PARAMETER;\r
885 }\r
886 }\r
529a5a86
MK
887\r
888 mSmmMpSyncData->CpuData[CpuIndex].Procedure = Procedure;\r
889 mSmmMpSyncData->CpuData[CpuIndex].Parameter = ProcArguments;\r
ed3d5ecb 890 ReleaseSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r
529a5a86 891\r
717fb604 892 if (BlockingMode) {\r
ed3d5ecb
JF
893 AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
894 ReleaseSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
529a5a86
MK
895 }\r
896 return EFI_SUCCESS;\r
897}\r
898\r
717fb604
JY
899/**\r
900 Schedule a procedure to run on the specified CPU in blocking mode.\r
901\r
902 @param[in] Procedure The address of the procedure to run\r
903 @param[in] CpuIndex Target CPU Index\r
904 @param[in, out] ProcArguments The parameter to pass to the procedure\r
905\r
906 @retval EFI_INVALID_PARAMETER CpuNumber not valid\r
907 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP\r
908 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM\r
909 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy\r
910 @retval EFI_SUCCESS The procedure has been successfully scheduled\r
911\r
912**/\r
913EFI_STATUS\r
914EFIAPI\r
915SmmBlockingStartupThisAp (\r
916 IN EFI_AP_PROCEDURE Procedure,\r
917 IN UINTN CpuIndex,\r
918 IN OUT VOID *ProcArguments OPTIONAL\r
919 )\r
920{\r
921 return InternalSmmStartupThisAp(Procedure, CpuIndex, ProcArguments, TRUE);\r
922}\r
923\r
924/**\r
925 Schedule a procedure to run on the specified CPU.\r
926\r
927 @param Procedure The address of the procedure to run\r
928 @param CpuIndex Target CPU Index\r
929 @param ProcArguments The parameter to pass to the procedure\r
930\r
931 @retval EFI_INVALID_PARAMETER CpuNumber not valid\r
932 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP\r
933 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM\r
934 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy\r
935 @retval EFI_SUCCESS The procedure has been successfully scheduled\r
936\r
937**/\r
938EFI_STATUS\r
939EFIAPI\r
940SmmStartupThisAp (\r
941 IN EFI_AP_PROCEDURE Procedure,\r
942 IN UINTN CpuIndex,\r
943 IN OUT VOID *ProcArguments OPTIONAL\r
944 )\r
945{\r
946 return InternalSmmStartupThisAp(Procedure, CpuIndex, ProcArguments, FeaturePcdGet (PcdCpuSmmBlockStartupThisAp));\r
947}\r
948\r
f45f2d4a 949/**\r
3eed6dda 950 This function sets DR6 & DR7 according to SMM save state, before running SMM C code.\r
f45f2d4a
JY
951 They are useful when you want to enable hardware breakpoints in SMM without entry SMM mode.\r
952\r
953 NOTE: It might not be appreciated in runtime since it might\r
954 conflict with OS debugging facilities. Turn them off in RELEASE.\r
955\r
956 @param CpuIndex CPU Index\r
957\r
958**/\r
959VOID\r
960EFIAPI\r
961CpuSmmDebugEntry (\r
962 IN UINTN CpuIndex\r
963 )\r
964{\r
965 SMRAM_SAVE_STATE_MAP *CpuSaveState;\r
966 \r
967 if (FeaturePcdGet (PcdCpuSmmDebug)) {\r
717fb604 968 ASSERT(CpuIndex < mMaxNumberOfCpus);\r
3eed6dda 969 CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];\r
f45f2d4a
JY
970 if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {\r
971 AsmWriteDr6 (CpuSaveState->x86._DR6);\r
972 AsmWriteDr7 (CpuSaveState->x86._DR7);\r
973 } else {\r
974 AsmWriteDr6 ((UINTN)CpuSaveState->x64._DR6);\r
975 AsmWriteDr7 ((UINTN)CpuSaveState->x64._DR7);\r
976 }\r
977 }\r
978}\r
979\r
980/**\r
3eed6dda 981 This function restores DR6 & DR7 to SMM save state.\r
f45f2d4a
JY
982\r
983 NOTE: It might not be appreciated in runtime since it might\r
984 conflict with OS debugging facilities. Turn them off in RELEASE.\r
985\r
986 @param CpuIndex CPU Index\r
987\r
988**/\r
989VOID\r
990EFIAPI\r
991CpuSmmDebugExit (\r
992 IN UINTN CpuIndex\r
993 )\r
994{\r
995 SMRAM_SAVE_STATE_MAP *CpuSaveState;\r
996\r
997 if (FeaturePcdGet (PcdCpuSmmDebug)) {\r
717fb604 998 ASSERT(CpuIndex < mMaxNumberOfCpus);\r
3eed6dda 999 CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];\r
f45f2d4a
JY
1000 if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {\r
1001 CpuSaveState->x86._DR7 = (UINT32)AsmReadDr7 ();\r
1002 CpuSaveState->x86._DR6 = (UINT32)AsmReadDr6 ();\r
1003 } else {\r
1004 CpuSaveState->x64._DR7 = AsmReadDr7 ();\r
1005 CpuSaveState->x64._DR6 = AsmReadDr6 ();\r
1006 }\r
1007 }\r
1008}\r
1009\r
529a5a86
MK
1010/**\r
1011 C function for SMI entry, each processor comes here upon SMI trigger.\r
1012\r
1013 @param CpuIndex CPU Index\r
1014\r
1015**/\r
1016VOID\r
1017EFIAPI\r
1018SmiRendezvous (\r
1019 IN UINTN CpuIndex\r
1020 )\r
1021{\r
f85d3ce2
JF
1022 EFI_STATUS Status;\r
1023 BOOLEAN ValidSmi;\r
1024 BOOLEAN IsBsp;\r
1025 BOOLEAN BspInProgress;\r
1026 UINTN Index;\r
1027 UINTN Cr2;\r
717fb604
JY
1028\r
1029 ASSERT(CpuIndex < mMaxNumberOfCpus);\r
529a5a86
MK
1030\r
1031 //\r
1032 // Save Cr2 because Page Fault exception in SMM may override its value\r
1033 //\r
1034 Cr2 = AsmReadCr2 ();\r
1035\r
1036 //\r
1037 // Perform CPU specific entry hooks\r
1038 //\r
1039 SmmCpuFeaturesRendezvousEntry (CpuIndex);\r
1040\r
1041 //\r
1042 // Determine if this is a valid SMI\r
1043 //\r
1044 ValidSmi = PlatformValidSmi();\r
1045\r
1046 //\r
1047 // Determine if BSP has been already in progress. Note this must be checked after\r
1048 // ValidSmi because BSP may clear a valid SMI source after checking in.\r
1049 //\r
fe3a75bc 1050 BspInProgress = *mSmmMpSyncData->InsideSmm;\r
529a5a86
MK
1051\r
1052 if (!BspInProgress && !ValidSmi) {\r
1053 //\r
1054 // If we reach here, it means when we sampled the ValidSmi flag, SMI status had not\r
1055 // been cleared by BSP in a new SMI run (so we have a truly invalid SMI), or SMI\r
1056 // status had been cleared by BSP and an existing SMI run has almost ended. (Note\r
1057 // we sampled ValidSmi flag BEFORE judging BSP-in-progress status.) In both cases, there\r
1058 // is nothing we need to do.\r
1059 //\r
1060 goto Exit;\r
1061 } else {\r
1062 //\r
1063 // Signal presence of this processor\r
1064 //\r
fe3a75bc 1065 if (ReleaseSemaphore (mSmmMpSyncData->Counter) == 0) {\r
529a5a86
MK
1066 //\r
1067 // BSP has already ended the synchronization, so QUIT!!!\r
1068 //\r
1069\r
1070 //\r
1071 // Wait for BSP's signal to finish SMI\r
1072 //\r
fe3a75bc 1073 while (*mSmmMpSyncData->AllCpusInSync) {\r
529a5a86
MK
1074 CpuPause ();\r
1075 }\r
1076 goto Exit;\r
1077 } else {\r
1078\r
1079 //\r
1080 // The BUSY lock is initialized to Released state.\r
1081 // This needs to be done early enough to be ready for BSP's SmmStartupThisAp() call.\r
1082 // E.g., with Relaxed AP flow, SmmStartupThisAp() may be called immediately\r
1083 // after AP's present flag is detected.\r
1084 //\r
ed3d5ecb 1085 InitializeSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
529a5a86
MK
1086 }\r
1087\r
529a5a86
MK
1088 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r
1089 ActivateSmmProfile (CpuIndex);\r
1090 }\r
1091\r
1092 if (BspInProgress) {\r
1093 //\r
1094 // BSP has been elected. Follow AP path, regardless of ValidSmi flag\r
1095 // as BSP may have cleared the SMI status\r
1096 //\r
1097 APHandler (CpuIndex, ValidSmi, mSmmMpSyncData->EffectiveSyncMode);\r
1098 } else {\r
1099 //\r
1100 // We have a valid SMI\r
1101 //\r
1102\r
1103 //\r
1104 // Elect BSP\r
1105 //\r
1106 IsBsp = FALSE;\r
1107 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {\r
1108 if (!mSmmMpSyncData->SwitchBsp || mSmmMpSyncData->CandidateBsp[CpuIndex]) {\r
1109 //\r
1110 // Call platform hook to do BSP election\r
1111 //\r
1112 Status = PlatformSmmBspElection (&IsBsp);\r
1113 if (EFI_SUCCESS == Status) {\r
1114 //\r
1115 // Platform hook determines successfully\r
1116 //\r
1117 if (IsBsp) {\r
1118 mSmmMpSyncData->BspIndex = (UINT32)CpuIndex;\r
1119 }\r
1120 } else {\r
1121 //\r
1122 // Platform hook fails to determine, use default BSP election method\r
1123 //\r
1124 InterlockedCompareExchange32 (\r
1125 (UINT32*)&mSmmMpSyncData->BspIndex,\r
1126 (UINT32)-1,\r
1127 (UINT32)CpuIndex\r
1128 );\r
1129 }\r
1130 }\r
1131 }\r
1132\r
1133 //\r
1134 // "mSmmMpSyncData->BspIndex == CpuIndex" means this is the BSP\r
1135 //\r
1136 if (mSmmMpSyncData->BspIndex == CpuIndex) {\r
1137\r
1138 //\r
1139 // Clear last request for SwitchBsp.\r
1140 //\r
1141 if (mSmmMpSyncData->SwitchBsp) {\r
1142 mSmmMpSyncData->SwitchBsp = FALSE;\r
1143 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
1144 mSmmMpSyncData->CandidateBsp[Index] = FALSE;\r
1145 }\r
1146 }\r
1147\r
1148 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r
1149 SmmProfileRecordSmiNum ();\r
1150 }\r
1151\r
1152 //\r
1153 // BSP Handler is always called with a ValidSmi == TRUE\r
1154 //\r
1155 BSPHandler (CpuIndex, mSmmMpSyncData->EffectiveSyncMode);\r
529a5a86
MK
1156 } else {\r
1157 APHandler (CpuIndex, ValidSmi, mSmmMpSyncData->EffectiveSyncMode);\r
1158 }\r
1159 }\r
1160\r
ed3d5ecb 1161 ASSERT (*mSmmMpSyncData->CpuData[CpuIndex].Run == 0);\r
529a5a86
MK
1162\r
1163 //\r
1164 // Wait for BSP's signal to exit SMI\r
1165 //\r
fe3a75bc 1166 while (*mSmmMpSyncData->AllCpusInSync) {\r
529a5a86
MK
1167 CpuPause ();\r
1168 }\r
1169 }\r
1170\r
1171Exit:\r
1172 SmmCpuFeaturesRendezvousExit (CpuIndex);\r
1173 //\r
1174 // Restore Cr2\r
1175 //\r
1176 AsmWriteCr2 (Cr2);\r
1177}\r
1178\r
1d648531
JF
1179/**\r
1180 Allocate buffer for all semaphores and spin locks.\r
1181\r
1182**/\r
1183VOID\r
1184InitializeSmmCpuSemaphores (\r
1185 VOID\r
1186 )\r
1187{\r
1188 UINTN ProcessorCount;\r
1189 UINTN TotalSize;\r
1190 UINTN GlobalSemaphoresSize;\r
4e920581 1191 UINTN CpuSemaphoresSize;\r
695e62d1 1192 UINTN MsrSemahporeSize;\r
1d648531
JF
1193 UINTN SemaphoreSize;\r
1194 UINTN Pages;\r
1195 UINTN *SemaphoreBlock;\r
1196 UINTN SemaphoreAddr;\r
1197\r
1198 SemaphoreSize = GetSpinLockProperties ();\r
1199 ProcessorCount = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;\r
1200 GlobalSemaphoresSize = (sizeof (SMM_CPU_SEMAPHORE_GLOBAL) / sizeof (VOID *)) * SemaphoreSize;\r
4e920581 1201 CpuSemaphoresSize = (sizeof (SMM_CPU_SEMAPHORE_CPU) / sizeof (VOID *)) * ProcessorCount * SemaphoreSize;\r
695e62d1
JF
1202 MsrSemahporeSize = MSR_SPIN_LOCK_INIT_NUM * SemaphoreSize;\r
1203 TotalSize = GlobalSemaphoresSize + CpuSemaphoresSize + MsrSemahporeSize;\r
1d648531
JF
1204 DEBUG((EFI_D_INFO, "One Semaphore Size = 0x%x\n", SemaphoreSize));\r
1205 DEBUG((EFI_D_INFO, "Total Semaphores Size = 0x%x\n", TotalSize));\r
1206 Pages = EFI_SIZE_TO_PAGES (TotalSize);\r
1207 SemaphoreBlock = AllocatePages (Pages);\r
1208 ASSERT (SemaphoreBlock != NULL);\r
1209 ZeroMem (SemaphoreBlock, TotalSize);\r
1210\r
1211 SemaphoreAddr = (UINTN)SemaphoreBlock;\r
1212 mSmmCpuSemaphores.SemaphoreGlobal.Counter = (UINT32 *)SemaphoreAddr;\r
1213 SemaphoreAddr += SemaphoreSize;\r
1214 mSmmCpuSemaphores.SemaphoreGlobal.InsideSmm = (BOOLEAN *)SemaphoreAddr;\r
1215 SemaphoreAddr += SemaphoreSize;\r
1216 mSmmCpuSemaphores.SemaphoreGlobal.AllCpusInSync = (BOOLEAN *)SemaphoreAddr;\r
1217 SemaphoreAddr += SemaphoreSize;\r
1218 mSmmCpuSemaphores.SemaphoreGlobal.PFLock = (SPIN_LOCK *)SemaphoreAddr;\r
1219 SemaphoreAddr += SemaphoreSize;\r
1220 mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock\r
1221 = (SPIN_LOCK *)SemaphoreAddr;\r
6c4c15fa
JF
1222 SemaphoreAddr += SemaphoreSize;\r
1223 mSmmCpuSemaphores.SemaphoreGlobal.MemoryMappedLock\r
1224 = (SPIN_LOCK *)SemaphoreAddr;\r
1225\r
4e920581
JF
1226 SemaphoreAddr = (UINTN)SemaphoreBlock + GlobalSemaphoresSize;\r
1227 mSmmCpuSemaphores.SemaphoreCpu.Busy = (SPIN_LOCK *)SemaphoreAddr;\r
1228 SemaphoreAddr += ProcessorCount * SemaphoreSize;\r
1229 mSmmCpuSemaphores.SemaphoreCpu.Run = (UINT32 *)SemaphoreAddr;\r
1230 SemaphoreAddr += ProcessorCount * SemaphoreSize;\r
1231 mSmmCpuSemaphores.SemaphoreCpu.Present = (BOOLEAN *)SemaphoreAddr;\r
1232\r
695e62d1
JF
1233 SemaphoreAddr = (UINTN)SemaphoreBlock + GlobalSemaphoresSize + CpuSemaphoresSize;\r
1234 mSmmCpuSemaphores.SemaphoreMsr.Msr = (SPIN_LOCK *)SemaphoreAddr;\r
1235 mSmmCpuSemaphores.SemaphoreMsr.AvailableCounter =\r
1236 ((UINTN)SemaphoreBlock + Pages * SIZE_4KB - SemaphoreAddr) / SemaphoreSize;\r
1237 ASSERT (mSmmCpuSemaphores.SemaphoreMsr.AvailableCounter >= MSR_SPIN_LOCK_INIT_NUM);\r
1238\r
fe3a75bc
JF
1239 mPFLock = mSmmCpuSemaphores.SemaphoreGlobal.PFLock;\r
1240 mConfigSmmCodeAccessCheckLock = mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock;\r
6c4c15fa 1241 mMemoryMappedLock = mSmmCpuSemaphores.SemaphoreGlobal.MemoryMappedLock;\r
fe3a75bc 1242\r
1d648531
JF
1243 mSemaphoreSize = SemaphoreSize;\r
1244}\r
529a5a86
MK
1245\r
1246/**\r
1247 Initialize un-cacheable data.\r
1248\r
1249**/\r
1250VOID\r
1251EFIAPI\r
1252InitializeMpSyncData (\r
1253 VOID\r
1254 )\r
1255{\r
8b9311b7
JF
1256 UINTN CpuIndex;\r
1257\r
529a5a86 1258 if (mSmmMpSyncData != NULL) {\r
e78a2a49
JF
1259 //\r
1260 // mSmmMpSyncDataSize includes one structure of SMM_DISPATCHER_MP_SYNC_DATA, one\r
1261 // CpuData array of SMM_CPU_DATA_BLOCK and one CandidateBsp array of BOOLEAN.\r
1262 //\r
1263 ZeroMem (mSmmMpSyncData, mSmmMpSyncDataSize);\r
529a5a86
MK
1264 mSmmMpSyncData->CpuData = (SMM_CPU_DATA_BLOCK *)((UINT8 *)mSmmMpSyncData + sizeof (SMM_DISPATCHER_MP_SYNC_DATA));\r
1265 mSmmMpSyncData->CandidateBsp = (BOOLEAN *)(mSmmMpSyncData->CpuData + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus);\r
1266 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {\r
1267 //\r
1268 // Enable BSP election by setting BspIndex to -1\r
1269 //\r
1270 mSmmMpSyncData->BspIndex = (UINT32)-1;\r
1271 }\r
b43dd229 1272 mSmmMpSyncData->EffectiveSyncMode = mCpuSmmSyncMode;\r
1d648531 1273\r
8b9311b7
JF
1274 mSmmMpSyncData->Counter = mSmmCpuSemaphores.SemaphoreGlobal.Counter;\r
1275 mSmmMpSyncData->InsideSmm = mSmmCpuSemaphores.SemaphoreGlobal.InsideSmm;\r
1276 mSmmMpSyncData->AllCpusInSync = mSmmCpuSemaphores.SemaphoreGlobal.AllCpusInSync;\r
1277 ASSERT (mSmmMpSyncData->Counter != NULL && mSmmMpSyncData->InsideSmm != NULL &&\r
1278 mSmmMpSyncData->AllCpusInSync != NULL);\r
1279 *mSmmMpSyncData->Counter = 0;\r
1280 *mSmmMpSyncData->InsideSmm = FALSE;\r
1281 *mSmmMpSyncData->AllCpusInSync = FALSE;\r
1282\r
1283 for (CpuIndex = 0; CpuIndex < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; CpuIndex ++) {\r
1284 mSmmMpSyncData->CpuData[CpuIndex].Busy =\r
1285 (SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Busy + mSemaphoreSize * CpuIndex);\r
1286 mSmmMpSyncData->CpuData[CpuIndex].Run =\r
1287 (UINT32 *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Run + mSemaphoreSize * CpuIndex);\r
1288 mSmmMpSyncData->CpuData[CpuIndex].Present =\r
1289 (BOOLEAN *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Present + mSemaphoreSize * CpuIndex);\r
56e4a7d7
JF
1290 *(mSmmMpSyncData->CpuData[CpuIndex].Busy) = 0;\r
1291 *(mSmmMpSyncData->CpuData[CpuIndex].Run) = 0;\r
1292 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;\r
8b9311b7 1293 }\r
529a5a86
MK
1294 }\r
1295}\r
1296\r
1297/**\r
1298 Initialize global data for MP synchronization.\r
1299\r
1300 @param Stacks Base address of SMI stack buffer for all processors.\r
1301 @param StackSize Stack size for each processor in SMM.\r
1302\r
1303**/\r
1304UINT32\r
1305InitializeMpServiceData (\r
1306 IN VOID *Stacks,\r
1307 IN UINTN StackSize\r
1308 )\r
1309{\r
1310 UINT32 Cr3;\r
1311 UINTN Index;\r
529a5a86 1312 UINT8 *GdtTssTables;\r
529a5a86
MK
1313 UINTN GdtTableStepSize;\r
1314\r
8b9311b7
JF
1315 //\r
1316 // Allocate memory for all locks and semaphores\r
1317 //\r
1318 InitializeSmmCpuSemaphores ();\r
1319\r
d67b73cc
JF
1320 //\r
1321 // Initialize mSmmMpSyncData\r
1322 //\r
1323 mSmmMpSyncDataSize = sizeof (SMM_DISPATCHER_MP_SYNC_DATA) +\r
1324 (sizeof (SMM_CPU_DATA_BLOCK) + sizeof (BOOLEAN)) * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;\r
1325 mSmmMpSyncData = (SMM_DISPATCHER_MP_SYNC_DATA*) AllocatePages (EFI_SIZE_TO_PAGES (mSmmMpSyncDataSize));\r
1326 ASSERT (mSmmMpSyncData != NULL);\r
b43dd229 1327 mCpuSmmSyncMode = (SMM_CPU_SYNC_MODE)PcdGet8 (PcdCpuSmmSyncMode);\r
d67b73cc
JF
1328 InitializeMpSyncData ();\r
1329\r
529a5a86
MK
1330 //\r
1331 // Initialize physical address mask\r
1332 // NOTE: Physical memory above virtual address limit is not supported !!!\r
1333 //\r
1334 AsmCpuid (0x80000008, (UINT32*)&Index, NULL, NULL, NULL);\r
1335 gPhyMask = LShiftU64 (1, (UINT8)Index) - 1;\r
1336 gPhyMask &= (1ull << 48) - EFI_PAGE_SIZE;\r
1337\r
1338 //\r
1339 // Create page tables\r
1340 //\r
1341 Cr3 = SmmInitPageTable ();\r
1342\r
fe5f1949 1343 GdtTssTables = InitGdt (Cr3, &GdtTableStepSize);\r
529a5a86
MK
1344\r
1345 //\r
f12367a0 1346 // Install SMI handler for each CPU\r
529a5a86
MK
1347 //\r
1348 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
529a5a86
MK
1349 InstallSmiHandler (\r
1350 Index,\r
1351 (UINT32)mCpuHotPlugData.SmBase[Index],\r
1352 (VOID*)((UINTN)Stacks + (StackSize * Index)),\r
1353 StackSize,\r
f12367a0
MK
1354 (UINTN)(GdtTssTables + GdtTableStepSize * Index),\r
1355 gcSmiGdtr.Limit + 1,\r
529a5a86
MK
1356 gcSmiIdtr.Base,\r
1357 gcSmiIdtr.Limit + 1,\r
1358 Cr3\r
1359 );\r
1360 }\r
1361\r
529a5a86
MK
1362 //\r
1363 // Record current MTRR settings\r
1364 //\r
26ab5ac3
MK
1365 ZeroMem (&gSmiMtrrs, sizeof (gSmiMtrrs));\r
1366 MtrrGetAllMtrrs (&gSmiMtrrs);\r
529a5a86
MK
1367\r
1368 return Cr3;\r
1369}\r
1370\r
1371/**\r
1372\r
1373 Register the SMM Foundation entry point.\r
1374\r
1375 @param This Pointer to EFI_SMM_CONFIGURATION_PROTOCOL instance\r
1376 @param SmmEntryPoint SMM Foundation EntryPoint\r
1377\r
1378 @retval EFI_SUCCESS Successfully to register SMM foundation entry point\r
1379\r
1380**/\r
1381EFI_STATUS\r
1382EFIAPI\r
1383RegisterSmmEntry (\r
1384 IN CONST EFI_SMM_CONFIGURATION_PROTOCOL *This,\r
1385 IN EFI_SMM_ENTRY_POINT SmmEntryPoint\r
1386 )\r
1387{\r
1388 //\r
1389 // Record SMM Foundation EntryPoint, later invoke it on SMI entry vector.\r
1390 //\r
1391 gSmmCpuPrivate->SmmCoreEntry = SmmEntryPoint;\r
1392 return EFI_SUCCESS;\r
1393}\r