]> git.proxmox.com Git - mirror_edk2.git/blame_incremental - UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c
MdeModulePkg/Universal/Acpi/BootScriptExecutorDxe: Add support for PCD PcdPteMemoryEn...
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / MpService.c
... / ...
CommitLineData
1/** @file\r
2SMM MP service implementation\r
3\r
4Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.<BR>\r
5This program and the accompanying materials\r
6are licensed and made available under the terms and conditions of the BSD License\r
7which accompanies this distribution. The full text of the license may be found at\r
8http://opensource.org/licenses/bsd-license.php\r
9\r
10THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
11WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
12\r
13**/\r
14\r
15#include "PiSmmCpuDxeSmm.h"\r
16\r
17//\r
18// Slots for all MTRR( FIXED MTRR + VARIABLE MTRR + MTRR_LIB_IA32_MTRR_DEF_TYPE)\r
19//\r
20MTRR_SETTINGS gSmiMtrrs;\r
21UINT64 gPhyMask;\r
22SMM_DISPATCHER_MP_SYNC_DATA *mSmmMpSyncData = NULL;\r
23UINTN mSmmMpSyncDataSize;\r
24SMM_CPU_SEMAPHORES mSmmCpuSemaphores;\r
25UINTN mSemaphoreSize;\r
26SPIN_LOCK *mPFLock = NULL;\r
27SMM_CPU_SYNC_MODE mCpuSmmSyncMode;\r
28\r
29/**\r
30 Performs an atomic compare exchange operation to get semaphore.\r
31 The compare exchange operation must be performed using\r
32 MP safe mechanisms.\r
33\r
34 @param Sem IN: 32-bit unsigned integer\r
35 OUT: original integer - 1\r
36 @return Original integer - 1\r
37\r
38**/\r
39UINT32\r
40WaitForSemaphore (\r
41 IN OUT volatile UINT32 *Sem\r
42 )\r
43{\r
44 UINT32 Value;\r
45\r
46 do {\r
47 Value = *Sem;\r
48 } while (Value == 0 ||\r
49 InterlockedCompareExchange32 (\r
50 (UINT32*)Sem,\r
51 Value,\r
52 Value - 1\r
53 ) != Value);\r
54 return Value - 1;\r
55}\r
56\r
57\r
58/**\r
59 Performs an atomic compare exchange operation to release semaphore.\r
60 The compare exchange operation must be performed using\r
61 MP safe mechanisms.\r
62\r
63 @param Sem IN: 32-bit unsigned integer\r
64 OUT: original integer + 1\r
65 @return Original integer + 1\r
66\r
67**/\r
68UINT32\r
69ReleaseSemaphore (\r
70 IN OUT volatile UINT32 *Sem\r
71 )\r
72{\r
73 UINT32 Value;\r
74\r
75 do {\r
76 Value = *Sem;\r
77 } while (Value + 1 != 0 &&\r
78 InterlockedCompareExchange32 (\r
79 (UINT32*)Sem,\r
80 Value,\r
81 Value + 1\r
82 ) != Value);\r
83 return Value + 1;\r
84}\r
85\r
86/**\r
87 Performs an atomic compare exchange operation to lock semaphore.\r
88 The compare exchange operation must be performed using\r
89 MP safe mechanisms.\r
90\r
91 @param Sem IN: 32-bit unsigned integer\r
92 OUT: -1\r
93 @return Original integer\r
94\r
95**/\r
96UINT32\r
97LockdownSemaphore (\r
98 IN OUT volatile UINT32 *Sem\r
99 )\r
100{\r
101 UINT32 Value;\r
102\r
103 do {\r
104 Value = *Sem;\r
105 } while (InterlockedCompareExchange32 (\r
106 (UINT32*)Sem,\r
107 Value, (UINT32)-1\r
108 ) != Value);\r
109 return Value;\r
110}\r
111\r
112/**\r
113 Wait all APs to performs an atomic compare exchange operation to release semaphore.\r
114\r
115 @param NumberOfAPs AP number\r
116\r
117**/\r
118VOID\r
119WaitForAllAPs (\r
120 IN UINTN NumberOfAPs\r
121 )\r
122{\r
123 UINTN BspIndex;\r
124\r
125 BspIndex = mSmmMpSyncData->BspIndex;\r
126 while (NumberOfAPs-- > 0) {\r
127 WaitForSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
128 }\r
129}\r
130\r
131/**\r
132 Performs an atomic compare exchange operation to release semaphore\r
133 for each AP.\r
134\r
135**/\r
136VOID\r
137ReleaseAllAPs (\r
138 VOID\r
139 )\r
140{\r
141 UINTN Index;\r
142 UINTN BspIndex;\r
143\r
144 BspIndex = mSmmMpSyncData->BspIndex;\r
145 for (Index = mMaxNumberOfCpus; Index-- > 0;) {\r
146 if (Index != BspIndex && *(mSmmMpSyncData->CpuData[Index].Present)) {\r
147 ReleaseSemaphore (mSmmMpSyncData->CpuData[Index].Run);\r
148 }\r
149 }\r
150}\r
151\r
152/**\r
153 Checks if all CPUs (with certain exceptions) have checked in for this SMI run\r
154\r
155 @param Exceptions CPU Arrival exception flags.\r
156\r
157 @retval TRUE if all CPUs the have checked in.\r
158 @retval FALSE if at least one Normal AP hasn't checked in.\r
159\r
160**/\r
161BOOLEAN\r
162AllCpusInSmmWithExceptions (\r
163 SMM_CPU_ARRIVAL_EXCEPTIONS Exceptions\r
164 )\r
165{\r
166 UINTN Index;\r
167 SMM_CPU_DATA_BLOCK *CpuData;\r
168 EFI_PROCESSOR_INFORMATION *ProcessorInfo;\r
169\r
170 ASSERT (*mSmmMpSyncData->Counter <= mNumberOfCpus);\r
171\r
172 if (*mSmmMpSyncData->Counter == mNumberOfCpus) {\r
173 return TRUE;\r
174 }\r
175\r
176 CpuData = mSmmMpSyncData->CpuData;\r
177 ProcessorInfo = gSmmCpuPrivate->ProcessorInfo;\r
178 for (Index = mMaxNumberOfCpus; Index-- > 0;) {\r
179 if (!(*(CpuData[Index].Present)) && ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID) {\r
180 if (((Exceptions & ARRIVAL_EXCEPTION_DELAYED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmDelayed) != 0) {\r
181 continue;\r
182 }\r
183 if (((Exceptions & ARRIVAL_EXCEPTION_BLOCKED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmBlocked) != 0) {\r
184 continue;\r
185 }\r
186 if (((Exceptions & ARRIVAL_EXCEPTION_SMI_DISABLED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmEnable) != 0) {\r
187 continue;\r
188 }\r
189 return FALSE;\r
190 }\r
191 }\r
192\r
193\r
194 return TRUE;\r
195}\r
196\r
197\r
198/**\r
199 Given timeout constraint, wait for all APs to arrive, and insure when this function returns, no AP will execute normal mode code before\r
200 entering SMM, except SMI disabled APs.\r
201\r
202**/\r
203VOID\r
204SmmWaitForApArrival (\r
205 VOID\r
206 )\r
207{\r
208 UINT64 Timer;\r
209 UINTN Index;\r
210\r
211 ASSERT (*mSmmMpSyncData->Counter <= mNumberOfCpus);\r
212\r
213 //\r
214 // Platform implementor should choose a timeout value appropriately:\r
215 // - The timeout value should balance the SMM time constrains and the likelihood that delayed CPUs are excluded in the SMM run. Note\r
216 // the SMI Handlers must ALWAYS take into account the cases that not all APs are available in an SMI run.\r
217 // - The timeout value must, in the case of 2nd timeout, be at least long enough to give time for all APs to receive the SMI IPI\r
218 // and either enter SMM or buffer the SMI, to insure there is no CPU running normal mode code when SMI handling starts. This will\r
219 // be TRUE even if a blocked CPU is brought out of the blocked state by a normal mode CPU (before the normal mode CPU received the\r
220 // SMI IPI), because with a buffered SMI, and CPU will enter SMM immediately after it is brought out of the blocked state.\r
221 // - The timeout value must be longer than longest possible IO operation in the system\r
222 //\r
223\r
224 //\r
225 // Sync with APs 1st timeout\r
226 //\r
227 for (Timer = StartSyncTimer ();\r
228 !IsSyncTimerTimeout (Timer) &&\r
229 !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED | ARRIVAL_EXCEPTION_SMI_DISABLED );\r
230 ) {\r
231 CpuPause ();\r
232 }\r
233\r
234 //\r
235 // Not all APs have arrived, so we need 2nd round of timeout. IPIs should be sent to ALL none present APs,\r
236 // because:\r
237 // a) Delayed AP may have just come out of the delayed state. Blocked AP may have just been brought out of blocked state by some AP running\r
238 // normal mode code. These APs need to be guaranteed to have an SMI pending to insure that once they are out of delayed / blocked state, they\r
239 // enter SMI immediately without executing instructions in normal mode. Note traditional flow requires there are no APs doing normal mode\r
240 // work while SMI handling is on-going.\r
241 // b) As a consequence of SMI IPI sending, (spurious) SMI may occur after this SMM run.\r
242 // c) ** NOTE **: Use SMI disabling feature VERY CAREFULLY (if at all) for traditional flow, because a processor in SMI-disabled state\r
243 // will execute normal mode code, which breaks the traditional SMI handlers' assumption that no APs are doing normal\r
244 // mode work while SMI handling is on-going.\r
245 // d) We don't add code to check SMI disabling status to skip sending IPI to SMI disabled APs, because:\r
246 // - In traditional flow, SMI disabling is discouraged.\r
247 // - In relaxed flow, CheckApArrival() will check SMI disabling status before calling this function.\r
248 // In both cases, adding SMI-disabling checking code increases overhead.\r
249 //\r
250 if (*mSmmMpSyncData->Counter < mNumberOfCpus) {\r
251 //\r
252 // Send SMI IPIs to bring outside processors in\r
253 //\r
254 for (Index = mMaxNumberOfCpus; Index-- > 0;) {\r
255 if (!(*(mSmmMpSyncData->CpuData[Index].Present)) && gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID) {\r
256 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId);\r
257 }\r
258 }\r
259\r
260 //\r
261 // Sync with APs 2nd timeout.\r
262 //\r
263 for (Timer = StartSyncTimer ();\r
264 !IsSyncTimerTimeout (Timer) &&\r
265 !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED | ARRIVAL_EXCEPTION_SMI_DISABLED );\r
266 ) {\r
267 CpuPause ();\r
268 }\r
269 }\r
270\r
271 return;\r
272}\r
273\r
274\r
275/**\r
276 Replace OS MTRR's with SMI MTRR's.\r
277\r
278 @param CpuIndex Processor Index\r
279\r
280**/\r
281VOID\r
282ReplaceOSMtrrs (\r
283 IN UINTN CpuIndex\r
284 )\r
285{\r
286 SmmCpuFeaturesDisableSmrr ();\r
287\r
288 //\r
289 // Replace all MTRRs registers\r
290 //\r
291 MtrrSetAllMtrrs (&gSmiMtrrs);\r
292}\r
293\r
294/**\r
295 SMI handler for BSP.\r
296\r
297 @param CpuIndex BSP processor Index\r
298 @param SyncMode SMM MP sync mode\r
299\r
300**/\r
301VOID\r
302BSPHandler (\r
303 IN UINTN CpuIndex,\r
304 IN SMM_CPU_SYNC_MODE SyncMode\r
305 )\r
306{\r
307 UINTN Index;\r
308 MTRR_SETTINGS Mtrrs;\r
309 UINTN ApCount;\r
310 BOOLEAN ClearTopLevelSmiResult;\r
311 UINTN PresentCount;\r
312\r
313 ASSERT (CpuIndex == mSmmMpSyncData->BspIndex);\r
314 ApCount = 0;\r
315\r
316 //\r
317 // Flag BSP's presence\r
318 //\r
319 *mSmmMpSyncData->InsideSmm = TRUE;\r
320\r
321 //\r
322 // Initialize Debug Agent to start source level debug in BSP handler\r
323 //\r
324 InitializeDebugAgent (DEBUG_AGENT_INIT_ENTER_SMI, NULL, NULL);\r
325\r
326 //\r
327 // Mark this processor's presence\r
328 //\r
329 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = TRUE;\r
330\r
331 //\r
332 // Clear platform top level SMI status bit before calling SMI handlers. If\r
333 // we cleared it after SMI handlers are run, we would miss the SMI that\r
334 // occurs after SMI handlers are done and before SMI status bit is cleared.\r
335 //\r
336 ClearTopLevelSmiResult = ClearTopLevelSmiStatus();\r
337 ASSERT (ClearTopLevelSmiResult == TRUE);\r
338\r
339 //\r
340 // Set running processor index\r
341 //\r
342 gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu = CpuIndex;\r
343\r
344 //\r
345 // If Traditional Sync Mode or need to configure MTRRs: gather all available APs.\r
346 //\r
347 if (SyncMode == SmmCpuSyncModeTradition || SmmCpuFeaturesNeedConfigureMtrrs()) {\r
348\r
349 //\r
350 // Wait for APs to arrive\r
351 //\r
352 SmmWaitForApArrival();\r
353\r
354 //\r
355 // Lock the counter down and retrieve the number of APs\r
356 //\r
357 *mSmmMpSyncData->AllCpusInSync = TRUE;\r
358 ApCount = LockdownSemaphore (mSmmMpSyncData->Counter) - 1;\r
359\r
360 //\r
361 // Wait for all APs to get ready for programming MTRRs\r
362 //\r
363 WaitForAllAPs (ApCount);\r
364\r
365 if (SmmCpuFeaturesNeedConfigureMtrrs()) {\r
366 //\r
367 // Signal all APs it's time for backup MTRRs\r
368 //\r
369 ReleaseAllAPs ();\r
370\r
371 //\r
372 // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at\r
373 // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set\r
374 // to a large enough value to avoid this situation.\r
375 // Note: For HT capable CPUs, threads within a core share the same set of MTRRs.\r
376 // We do the backup first and then set MTRR to avoid race condition for threads\r
377 // in the same core.\r
378 //\r
379 MtrrGetAllMtrrs(&Mtrrs);\r
380\r
381 //\r
382 // Wait for all APs to complete their MTRR saving\r
383 //\r
384 WaitForAllAPs (ApCount);\r
385\r
386 //\r
387 // Let all processors program SMM MTRRs together\r
388 //\r
389 ReleaseAllAPs ();\r
390\r
391 //\r
392 // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at\r
393 // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set\r
394 // to a large enough value to avoid this situation.\r
395 //\r
396 ReplaceOSMtrrs (CpuIndex);\r
397\r
398 //\r
399 // Wait for all APs to complete their MTRR programming\r
400 //\r
401 WaitForAllAPs (ApCount);\r
402 }\r
403 }\r
404\r
405 //\r
406 // The BUSY lock is initialized to Acquired state\r
407 //\r
408 AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
409\r
410 //\r
411 // Perform the pre tasks\r
412 //\r
413 PerformPreTasks ();\r
414\r
415 //\r
416 // Invoke SMM Foundation EntryPoint with the processor information context.\r
417 //\r
418 gSmmCpuPrivate->SmmCoreEntry (&gSmmCpuPrivate->SmmCoreEntryContext);\r
419\r
420 //\r
421 // Make sure all APs have completed their pending none-block tasks\r
422 //\r
423 for (Index = mMaxNumberOfCpus; Index-- > 0;) {\r
424 if (Index != CpuIndex && *(mSmmMpSyncData->CpuData[Index].Present)) {\r
425 AcquireSpinLock (mSmmMpSyncData->CpuData[Index].Busy);\r
426 ReleaseSpinLock (mSmmMpSyncData->CpuData[Index].Busy);\r
427 }\r
428 }\r
429\r
430 //\r
431 // Perform the remaining tasks\r
432 //\r
433 PerformRemainingTasks ();\r
434\r
435 //\r
436 // If Relaxed-AP Sync Mode: gather all available APs after BSP SMM handlers are done, and\r
437 // make those APs to exit SMI synchronously. APs which arrive later will be excluded and\r
438 // will run through freely.\r
439 //\r
440 if (SyncMode != SmmCpuSyncModeTradition && !SmmCpuFeaturesNeedConfigureMtrrs()) {\r
441\r
442 //\r
443 // Lock the counter down and retrieve the number of APs\r
444 //\r
445 *mSmmMpSyncData->AllCpusInSync = TRUE;\r
446 ApCount = LockdownSemaphore (mSmmMpSyncData->Counter) - 1;\r
447 //\r
448 // Make sure all APs have their Present flag set\r
449 //\r
450 while (TRUE) {\r
451 PresentCount = 0;\r
452 for (Index = mMaxNumberOfCpus; Index-- > 0;) {\r
453 if (*(mSmmMpSyncData->CpuData[Index].Present)) {\r
454 PresentCount ++;\r
455 }\r
456 }\r
457 if (PresentCount > ApCount) {\r
458 break;\r
459 }\r
460 }\r
461 }\r
462\r
463 //\r
464 // Notify all APs to exit\r
465 //\r
466 *mSmmMpSyncData->InsideSmm = FALSE;\r
467 ReleaseAllAPs ();\r
468\r
469 //\r
470 // Wait for all APs to complete their pending tasks\r
471 //\r
472 WaitForAllAPs (ApCount);\r
473\r
474 if (SmmCpuFeaturesNeedConfigureMtrrs()) {\r
475 //\r
476 // Signal APs to restore MTRRs\r
477 //\r
478 ReleaseAllAPs ();\r
479\r
480 //\r
481 // Restore OS MTRRs\r
482 //\r
483 SmmCpuFeaturesReenableSmrr ();\r
484 MtrrSetAllMtrrs(&Mtrrs);\r
485\r
486 //\r
487 // Wait for all APs to complete MTRR programming\r
488 //\r
489 WaitForAllAPs (ApCount);\r
490 }\r
491\r
492 //\r
493 // Stop source level debug in BSP handler, the code below will not be\r
494 // debugged.\r
495 //\r
496 InitializeDebugAgent (DEBUG_AGENT_INIT_EXIT_SMI, NULL, NULL);\r
497\r
498 //\r
499 // Signal APs to Reset states/semaphore for this processor\r
500 //\r
501 ReleaseAllAPs ();\r
502\r
503 //\r
504 // Perform pending operations for hot-plug\r
505 //\r
506 SmmCpuUpdate ();\r
507\r
508 //\r
509 // Clear the Present flag of BSP\r
510 //\r
511 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;\r
512\r
513 //\r
514 // Gather APs to exit SMM synchronously. Note the Present flag is cleared by now but\r
515 // WaitForAllAps does not depend on the Present flag.\r
516 //\r
517 WaitForAllAPs (ApCount);\r
518\r
519 //\r
520 // Reset BspIndex to -1, meaning BSP has not been elected.\r
521 //\r
522 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {\r
523 mSmmMpSyncData->BspIndex = (UINT32)-1;\r
524 }\r
525\r
526 //\r
527 // Allow APs to check in from this point on\r
528 //\r
529 *mSmmMpSyncData->Counter = 0;\r
530 *mSmmMpSyncData->AllCpusInSync = FALSE;\r
531}\r
532\r
533/**\r
534 SMI handler for AP.\r
535\r
536 @param CpuIndex AP processor Index.\r
537 @param ValidSmi Indicates that current SMI is a valid SMI or not.\r
538 @param SyncMode SMM MP sync mode.\r
539\r
540**/\r
541VOID\r
542APHandler (\r
543 IN UINTN CpuIndex,\r
544 IN BOOLEAN ValidSmi,\r
545 IN SMM_CPU_SYNC_MODE SyncMode\r
546 )\r
547{\r
548 UINT64 Timer;\r
549 UINTN BspIndex;\r
550 MTRR_SETTINGS Mtrrs;\r
551\r
552 //\r
553 // Timeout BSP\r
554 //\r
555 for (Timer = StartSyncTimer ();\r
556 !IsSyncTimerTimeout (Timer) &&\r
557 !(*mSmmMpSyncData->InsideSmm);\r
558 ) {\r
559 CpuPause ();\r
560 }\r
561\r
562 if (!(*mSmmMpSyncData->InsideSmm)) {\r
563 //\r
564 // BSP timeout in the first round\r
565 //\r
566 if (mSmmMpSyncData->BspIndex != -1) {\r
567 //\r
568 // BSP Index is known\r
569 //\r
570 BspIndex = mSmmMpSyncData->BspIndex;\r
571 ASSERT (CpuIndex != BspIndex);\r
572\r
573 //\r
574 // Send SMI IPI to bring BSP in\r
575 //\r
576 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[BspIndex].ProcessorId);\r
577\r
578 //\r
579 // Now clock BSP for the 2nd time\r
580 //\r
581 for (Timer = StartSyncTimer ();\r
582 !IsSyncTimerTimeout (Timer) &&\r
583 !(*mSmmMpSyncData->InsideSmm);\r
584 ) {\r
585 CpuPause ();\r
586 }\r
587\r
588 if (!(*mSmmMpSyncData->InsideSmm)) {\r
589 //\r
590 // Give up since BSP is unable to enter SMM\r
591 // and signal the completion of this AP\r
592 WaitForSemaphore (mSmmMpSyncData->Counter);\r
593 return;\r
594 }\r
595 } else {\r
596 //\r
597 // Don't know BSP index. Give up without sending IPI to BSP.\r
598 //\r
599 WaitForSemaphore (mSmmMpSyncData->Counter);\r
600 return;\r
601 }\r
602 }\r
603\r
604 //\r
605 // BSP is available\r
606 //\r
607 BspIndex = mSmmMpSyncData->BspIndex;\r
608 ASSERT (CpuIndex != BspIndex);\r
609\r
610 //\r
611 // Mark this processor's presence\r
612 //\r
613 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = TRUE;\r
614\r
615 if (SyncMode == SmmCpuSyncModeTradition || SmmCpuFeaturesNeedConfigureMtrrs()) {\r
616 //\r
617 // Notify BSP of arrival at this point\r
618 //\r
619 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
620 }\r
621\r
622 if (SmmCpuFeaturesNeedConfigureMtrrs()) {\r
623 //\r
624 // Wait for the signal from BSP to backup MTRRs\r
625 //\r
626 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r
627\r
628 //\r
629 // Backup OS MTRRs\r
630 //\r
631 MtrrGetAllMtrrs(&Mtrrs);\r
632\r
633 //\r
634 // Signal BSP the completion of this AP\r
635 //\r
636 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
637\r
638 //\r
639 // Wait for BSP's signal to program MTRRs\r
640 //\r
641 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r
642\r
643 //\r
644 // Replace OS MTRRs with SMI MTRRs\r
645 //\r
646 ReplaceOSMtrrs (CpuIndex);\r
647\r
648 //\r
649 // Signal BSP the completion of this AP\r
650 //\r
651 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
652 }\r
653\r
654 while (TRUE) {\r
655 //\r
656 // Wait for something to happen\r
657 //\r
658 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r
659\r
660 //\r
661 // Check if BSP wants to exit SMM\r
662 //\r
663 if (!(*mSmmMpSyncData->InsideSmm)) {\r
664 break;\r
665 }\r
666\r
667 //\r
668 // BUSY should be acquired by SmmStartupThisAp()\r
669 //\r
670 ASSERT (\r
671 !AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[CpuIndex].Busy)\r
672 );\r
673\r
674 //\r
675 // Invoke the scheduled procedure\r
676 //\r
677 (*mSmmMpSyncData->CpuData[CpuIndex].Procedure) (\r
678 (VOID*)mSmmMpSyncData->CpuData[CpuIndex].Parameter\r
679 );\r
680\r
681 //\r
682 // Release BUSY\r
683 //\r
684 ReleaseSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
685 }\r
686\r
687 if (SmmCpuFeaturesNeedConfigureMtrrs()) {\r
688 //\r
689 // Notify BSP the readiness of this AP to program MTRRs\r
690 //\r
691 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
692\r
693 //\r
694 // Wait for the signal from BSP to program MTRRs\r
695 //\r
696 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r
697\r
698 //\r
699 // Restore OS MTRRs\r
700 //\r
701 SmmCpuFeaturesReenableSmrr ();\r
702 MtrrSetAllMtrrs(&Mtrrs);\r
703 }\r
704\r
705 //\r
706 // Notify BSP the readiness of this AP to Reset states/semaphore for this processor\r
707 //\r
708 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
709\r
710 //\r
711 // Wait for the signal from BSP to Reset states/semaphore for this processor\r
712 //\r
713 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r
714\r
715 //\r
716 // Reset states/semaphore for this processor\r
717 //\r
718 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;\r
719\r
720 //\r
721 // Notify BSP the readiness of this AP to exit SMM\r
722 //\r
723 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
724\r
725}\r
726\r
727/**\r
728 Create 4G PageTable in SMRAM.\r
729\r
730 @param[in] Is32BitPageTable Whether the page table is 32-bit PAE\r
731 @return PageTable Address\r
732\r
733**/\r
734UINT32\r
735Gen4GPageTable (\r
736 IN BOOLEAN Is32BitPageTable\r
737 )\r
738{\r
739 VOID *PageTable;\r
740 UINTN Index;\r
741 UINT64 *Pte;\r
742 UINTN PagesNeeded;\r
743 UINTN Low2MBoundary;\r
744 UINTN High2MBoundary;\r
745 UINTN Pages;\r
746 UINTN GuardPage;\r
747 UINT64 *Pdpte;\r
748 UINTN PageIndex;\r
749 UINTN PageAddress;\r
750\r
751 Low2MBoundary = 0;\r
752 High2MBoundary = 0;\r
753 PagesNeeded = 0;\r
754 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
755 //\r
756 // Add one more page for known good stack, then find the lower 2MB aligned address.\r
757 //\r
758 Low2MBoundary = (mSmmStackArrayBase + EFI_PAGE_SIZE) & ~(SIZE_2MB-1);\r
759 //\r
760 // Add two more pages for known good stack and stack guard page,\r
761 // then find the lower 2MB aligned address.\r
762 //\r
763 High2MBoundary = (mSmmStackArrayEnd - mSmmStackSize + EFI_PAGE_SIZE * 2) & ~(SIZE_2MB-1);\r
764 PagesNeeded = ((High2MBoundary - Low2MBoundary) / SIZE_2MB) + 1;\r
765 }\r
766 //\r
767 // Allocate the page table\r
768 //\r
769 PageTable = AllocatePageTableMemory (5 + PagesNeeded);\r
770 ASSERT (PageTable != NULL);\r
771\r
772 PageTable = (VOID *)((UINTN)PageTable);\r
773 Pte = (UINT64*)PageTable;\r
774\r
775 //\r
776 // Zero out all page table entries first\r
777 //\r
778 ZeroMem (Pte, EFI_PAGES_TO_SIZE (1));\r
779\r
780 //\r
781 // Set Page Directory Pointers\r
782 //\r
783 for (Index = 0; Index < 4; Index++) {\r
784 Pte[Index] = (UINTN)PageTable + EFI_PAGE_SIZE * (Index + 1) + (Is32BitPageTable ? IA32_PAE_PDPTE_ATTRIBUTE_BITS : PAGE_ATTRIBUTE_BITS);\r
785 }\r
786 Pte += EFI_PAGE_SIZE / sizeof (*Pte);\r
787\r
788 //\r
789 // Fill in Page Directory Entries\r
790 //\r
791 for (Index = 0; Index < EFI_PAGE_SIZE * 4 / sizeof (*Pte); Index++) {\r
792 Pte[Index] = (Index << 21) | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;\r
793 }\r
794\r
795 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
796 Pages = (UINTN)PageTable + EFI_PAGES_TO_SIZE (5);\r
797 GuardPage = mSmmStackArrayBase + EFI_PAGE_SIZE;\r
798 Pdpte = (UINT64*)PageTable;\r
799 for (PageIndex = Low2MBoundary; PageIndex <= High2MBoundary; PageIndex += SIZE_2MB) {\r
800 Pte = (UINT64*)(UINTN)(Pdpte[BitFieldRead32 ((UINT32)PageIndex, 30, 31)] & ~(EFI_PAGE_SIZE - 1));\r
801 Pte[BitFieldRead32 ((UINT32)PageIndex, 21, 29)] = (UINT64)Pages | PAGE_ATTRIBUTE_BITS;\r
802 //\r
803 // Fill in Page Table Entries\r
804 //\r
805 Pte = (UINT64*)Pages;\r
806 PageAddress = PageIndex;\r
807 for (Index = 0; Index < EFI_PAGE_SIZE / sizeof (*Pte); Index++) {\r
808 if (PageAddress == GuardPage) {\r
809 //\r
810 // Mark the guard page as non-present\r
811 //\r
812 Pte[Index] = PageAddress;\r
813 GuardPage += mSmmStackSize;\r
814 if (GuardPage > mSmmStackArrayEnd) {\r
815 GuardPage = 0;\r
816 }\r
817 } else {\r
818 Pte[Index] = PageAddress | PAGE_ATTRIBUTE_BITS;\r
819 }\r
820 PageAddress+= EFI_PAGE_SIZE;\r
821 }\r
822 Pages += EFI_PAGE_SIZE;\r
823 }\r
824 }\r
825\r
826 return (UINT32)(UINTN)PageTable;\r
827}\r
828\r
829/**\r
830 Set memory cache ability.\r
831\r
832 @param PageTable PageTable Address\r
833 @param Address Memory Address to change cache ability\r
834 @param Cacheability Cache ability to set\r
835\r
836**/\r
837VOID\r
838SetCacheability (\r
839 IN UINT64 *PageTable,\r
840 IN UINTN Address,\r
841 IN UINT8 Cacheability\r
842 )\r
843{\r
844 UINTN PTIndex;\r
845 VOID *NewPageTableAddress;\r
846 UINT64 *NewPageTable;\r
847 UINTN Index;\r
848\r
849 ASSERT ((Address & EFI_PAGE_MASK) == 0);\r
850\r
851 if (sizeof (UINTN) == sizeof (UINT64)) {\r
852 PTIndex = (UINTN)RShiftU64 (Address, 39) & 0x1ff;\r
853 ASSERT (PageTable[PTIndex] & IA32_PG_P);\r
854 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & gPhyMask);\r
855 }\r
856\r
857 PTIndex = (UINTN)RShiftU64 (Address, 30) & 0x1ff;\r
858 ASSERT (PageTable[PTIndex] & IA32_PG_P);\r
859 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & gPhyMask);\r
860\r
861 //\r
862 // A perfect implementation should check the original cacheability with the\r
863 // one being set, and break a 2M page entry into pieces only when they\r
864 // disagreed.\r
865 //\r
866 PTIndex = (UINTN)RShiftU64 (Address, 21) & 0x1ff;\r
867 if ((PageTable[PTIndex] & IA32_PG_PS) != 0) {\r
868 //\r
869 // Allocate a page from SMRAM\r
870 //\r
871 NewPageTableAddress = AllocatePageTableMemory (1);\r
872 ASSERT (NewPageTableAddress != NULL);\r
873\r
874 NewPageTable = (UINT64 *)NewPageTableAddress;\r
875\r
876 for (Index = 0; Index < 0x200; Index++) {\r
877 NewPageTable[Index] = PageTable[PTIndex];\r
878 if ((NewPageTable[Index] & IA32_PG_PAT_2M) != 0) {\r
879 NewPageTable[Index] &= ~((UINT64)IA32_PG_PAT_2M);\r
880 NewPageTable[Index] |= (UINT64)IA32_PG_PAT_4K;\r
881 }\r
882 NewPageTable[Index] |= (UINT64)(Index << EFI_PAGE_SHIFT);\r
883 }\r
884\r
885 PageTable[PTIndex] = ((UINTN)NewPageTableAddress & gPhyMask) | PAGE_ATTRIBUTE_BITS;\r
886 }\r
887\r
888 ASSERT (PageTable[PTIndex] & IA32_PG_P);\r
889 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & gPhyMask);\r
890\r
891 PTIndex = (UINTN)RShiftU64 (Address, 12) & 0x1ff;\r
892 ASSERT (PageTable[PTIndex] & IA32_PG_P);\r
893 PageTable[PTIndex] &= ~((UINT64)((IA32_PG_PAT_4K | IA32_PG_CD | IA32_PG_WT)));\r
894 PageTable[PTIndex] |= (UINT64)Cacheability;\r
895}\r
896\r
897/**\r
898 Schedule a procedure to run on the specified CPU.\r
899\r
900 @param[in] Procedure The address of the procedure to run\r
901 @param[in] CpuIndex Target CPU Index\r
902 @param[in, out] ProcArguments The parameter to pass to the procedure\r
903 @param[in] BlockingMode Startup AP in blocking mode or not\r
904\r
905 @retval EFI_INVALID_PARAMETER CpuNumber not valid\r
906 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP\r
907 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM\r
908 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy\r
909 @retval EFI_SUCCESS The procedure has been successfully scheduled\r
910\r
911**/\r
912EFI_STATUS\r
913InternalSmmStartupThisAp (\r
914 IN EFI_AP_PROCEDURE Procedure,\r
915 IN UINTN CpuIndex,\r
916 IN OUT VOID *ProcArguments OPTIONAL,\r
917 IN BOOLEAN BlockingMode\r
918 )\r
919{\r
920 if (CpuIndex >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus) {\r
921 DEBUG((DEBUG_ERROR, "CpuIndex(%d) >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus(%d)\n", CpuIndex, gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus));\r
922 return EFI_INVALID_PARAMETER;\r
923 }\r
924 if (CpuIndex == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) {\r
925 DEBUG((DEBUG_ERROR, "CpuIndex(%d) == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu\n", CpuIndex));\r
926 return EFI_INVALID_PARAMETER;\r
927 }\r
928 if (!(*(mSmmMpSyncData->CpuData[CpuIndex].Present))) {\r
929 if (mSmmMpSyncData->EffectiveSyncMode == SmmCpuSyncModeTradition) {\r
930 DEBUG((DEBUG_ERROR, "!mSmmMpSyncData->CpuData[%d].Present\n", CpuIndex));\r
931 }\r
932 return EFI_INVALID_PARAMETER;\r
933 }\r
934 if (gSmmCpuPrivate->Operation[CpuIndex] == SmmCpuRemove) {\r
935 if (!FeaturePcdGet (PcdCpuHotPlugSupport)) {\r
936 DEBUG((DEBUG_ERROR, "gSmmCpuPrivate->Operation[%d] == SmmCpuRemove\n", CpuIndex));\r
937 }\r
938 return EFI_INVALID_PARAMETER;\r
939 }\r
940\r
941 if (BlockingMode) {\r
942 AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
943 } else {\r
944 if (!AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[CpuIndex].Busy)) {\r
945 DEBUG((DEBUG_ERROR, "mSmmMpSyncData->CpuData[%d].Busy\n", CpuIndex));\r
946 return EFI_INVALID_PARAMETER;\r
947 }\r
948 }\r
949\r
950 mSmmMpSyncData->CpuData[CpuIndex].Procedure = Procedure;\r
951 mSmmMpSyncData->CpuData[CpuIndex].Parameter = ProcArguments;\r
952 ReleaseSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r
953\r
954 if (BlockingMode) {\r
955 AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
956 ReleaseSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
957 }\r
958 return EFI_SUCCESS;\r
959}\r
960\r
961/**\r
962 Schedule a procedure to run on the specified CPU in blocking mode.\r
963\r
964 @param[in] Procedure The address of the procedure to run\r
965 @param[in] CpuIndex Target CPU Index\r
966 @param[in, out] ProcArguments The parameter to pass to the procedure\r
967\r
968 @retval EFI_INVALID_PARAMETER CpuNumber not valid\r
969 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP\r
970 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM\r
971 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy\r
972 @retval EFI_SUCCESS The procedure has been successfully scheduled\r
973\r
974**/\r
975EFI_STATUS\r
976EFIAPI\r
977SmmBlockingStartupThisAp (\r
978 IN EFI_AP_PROCEDURE Procedure,\r
979 IN UINTN CpuIndex,\r
980 IN OUT VOID *ProcArguments OPTIONAL\r
981 )\r
982{\r
983 return InternalSmmStartupThisAp(Procedure, CpuIndex, ProcArguments, TRUE);\r
984}\r
985\r
986/**\r
987 Schedule a procedure to run on the specified CPU.\r
988\r
989 @param Procedure The address of the procedure to run\r
990 @param CpuIndex Target CPU Index\r
991 @param ProcArguments The parameter to pass to the procedure\r
992\r
993 @retval EFI_INVALID_PARAMETER CpuNumber not valid\r
994 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP\r
995 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM\r
996 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy\r
997 @retval EFI_SUCCESS The procedure has been successfully scheduled\r
998\r
999**/\r
1000EFI_STATUS\r
1001EFIAPI\r
1002SmmStartupThisAp (\r
1003 IN EFI_AP_PROCEDURE Procedure,\r
1004 IN UINTN CpuIndex,\r
1005 IN OUT VOID *ProcArguments OPTIONAL\r
1006 )\r
1007{\r
1008 return InternalSmmStartupThisAp(Procedure, CpuIndex, ProcArguments, FeaturePcdGet (PcdCpuSmmBlockStartupThisAp));\r
1009}\r
1010\r
1011/**\r
1012 This function sets DR6 & DR7 according to SMM save state, before running SMM C code.\r
1013 They are useful when you want to enable hardware breakpoints in SMM without entry SMM mode.\r
1014\r
1015 NOTE: It might not be appreciated in runtime since it might\r
1016 conflict with OS debugging facilities. Turn them off in RELEASE.\r
1017\r
1018 @param CpuIndex CPU Index\r
1019\r
1020**/\r
1021VOID\r
1022EFIAPI\r
1023CpuSmmDebugEntry (\r
1024 IN UINTN CpuIndex\r
1025 )\r
1026{\r
1027 SMRAM_SAVE_STATE_MAP *CpuSaveState;\r
1028 \r
1029 if (FeaturePcdGet (PcdCpuSmmDebug)) {\r
1030 ASSERT(CpuIndex < mMaxNumberOfCpus);\r
1031 CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];\r
1032 if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {\r
1033 AsmWriteDr6 (CpuSaveState->x86._DR6);\r
1034 AsmWriteDr7 (CpuSaveState->x86._DR7);\r
1035 } else {\r
1036 AsmWriteDr6 ((UINTN)CpuSaveState->x64._DR6);\r
1037 AsmWriteDr7 ((UINTN)CpuSaveState->x64._DR7);\r
1038 }\r
1039 }\r
1040}\r
1041\r
1042/**\r
1043 This function restores DR6 & DR7 to SMM save state.\r
1044\r
1045 NOTE: It might not be appreciated in runtime since it might\r
1046 conflict with OS debugging facilities. Turn them off in RELEASE.\r
1047\r
1048 @param CpuIndex CPU Index\r
1049\r
1050**/\r
1051VOID\r
1052EFIAPI\r
1053CpuSmmDebugExit (\r
1054 IN UINTN CpuIndex\r
1055 )\r
1056{\r
1057 SMRAM_SAVE_STATE_MAP *CpuSaveState;\r
1058\r
1059 if (FeaturePcdGet (PcdCpuSmmDebug)) {\r
1060 ASSERT(CpuIndex < mMaxNumberOfCpus);\r
1061 CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];\r
1062 if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {\r
1063 CpuSaveState->x86._DR7 = (UINT32)AsmReadDr7 ();\r
1064 CpuSaveState->x86._DR6 = (UINT32)AsmReadDr6 ();\r
1065 } else {\r
1066 CpuSaveState->x64._DR7 = AsmReadDr7 ();\r
1067 CpuSaveState->x64._DR6 = AsmReadDr6 ();\r
1068 }\r
1069 }\r
1070}\r
1071\r
1072/**\r
1073 C function for SMI entry, each processor comes here upon SMI trigger.\r
1074\r
1075 @param CpuIndex CPU Index\r
1076\r
1077**/\r
1078VOID\r
1079EFIAPI\r
1080SmiRendezvous (\r
1081 IN UINTN CpuIndex\r
1082 )\r
1083{\r
1084 EFI_STATUS Status;\r
1085 BOOLEAN ValidSmi;\r
1086 BOOLEAN IsBsp;\r
1087 BOOLEAN BspInProgress;\r
1088 UINTN Index;\r
1089 UINTN Cr2;\r
1090\r
1091 ASSERT(CpuIndex < mMaxNumberOfCpus);\r
1092\r
1093 //\r
1094 // Save Cr2 because Page Fault exception in SMM may override its value\r
1095 //\r
1096 Cr2 = AsmReadCr2 ();\r
1097\r
1098 //\r
1099 // Perform CPU specific entry hooks\r
1100 //\r
1101 SmmCpuFeaturesRendezvousEntry (CpuIndex);\r
1102\r
1103 //\r
1104 // Determine if this is a valid SMI\r
1105 //\r
1106 ValidSmi = PlatformValidSmi();\r
1107\r
1108 //\r
1109 // Determine if BSP has been already in progress. Note this must be checked after\r
1110 // ValidSmi because BSP may clear a valid SMI source after checking in.\r
1111 //\r
1112 BspInProgress = *mSmmMpSyncData->InsideSmm;\r
1113\r
1114 if (!BspInProgress && !ValidSmi) {\r
1115 //\r
1116 // If we reach here, it means when we sampled the ValidSmi flag, SMI status had not\r
1117 // been cleared by BSP in a new SMI run (so we have a truly invalid SMI), or SMI\r
1118 // status had been cleared by BSP and an existing SMI run has almost ended. (Note\r
1119 // we sampled ValidSmi flag BEFORE judging BSP-in-progress status.) In both cases, there\r
1120 // is nothing we need to do.\r
1121 //\r
1122 goto Exit;\r
1123 } else {\r
1124 //\r
1125 // Signal presence of this processor\r
1126 //\r
1127 if (ReleaseSemaphore (mSmmMpSyncData->Counter) == 0) {\r
1128 //\r
1129 // BSP has already ended the synchronization, so QUIT!!!\r
1130 //\r
1131\r
1132 //\r
1133 // Wait for BSP's signal to finish SMI\r
1134 //\r
1135 while (*mSmmMpSyncData->AllCpusInSync) {\r
1136 CpuPause ();\r
1137 }\r
1138 goto Exit;\r
1139 } else {\r
1140\r
1141 //\r
1142 // The BUSY lock is initialized to Released state.\r
1143 // This needs to be done early enough to be ready for BSP's SmmStartupThisAp() call.\r
1144 // E.g., with Relaxed AP flow, SmmStartupThisAp() may be called immediately\r
1145 // after AP's present flag is detected.\r
1146 //\r
1147 InitializeSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
1148 }\r
1149\r
1150 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r
1151 ActivateSmmProfile (CpuIndex);\r
1152 }\r
1153\r
1154 if (BspInProgress) {\r
1155 //\r
1156 // BSP has been elected. Follow AP path, regardless of ValidSmi flag\r
1157 // as BSP may have cleared the SMI status\r
1158 //\r
1159 APHandler (CpuIndex, ValidSmi, mSmmMpSyncData->EffectiveSyncMode);\r
1160 } else {\r
1161 //\r
1162 // We have a valid SMI\r
1163 //\r
1164\r
1165 //\r
1166 // Elect BSP\r
1167 //\r
1168 IsBsp = FALSE;\r
1169 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {\r
1170 if (!mSmmMpSyncData->SwitchBsp || mSmmMpSyncData->CandidateBsp[CpuIndex]) {\r
1171 //\r
1172 // Call platform hook to do BSP election\r
1173 //\r
1174 Status = PlatformSmmBspElection (&IsBsp);\r
1175 if (EFI_SUCCESS == Status) {\r
1176 //\r
1177 // Platform hook determines successfully\r
1178 //\r
1179 if (IsBsp) {\r
1180 mSmmMpSyncData->BspIndex = (UINT32)CpuIndex;\r
1181 }\r
1182 } else {\r
1183 //\r
1184 // Platform hook fails to determine, use default BSP election method\r
1185 //\r
1186 InterlockedCompareExchange32 (\r
1187 (UINT32*)&mSmmMpSyncData->BspIndex,\r
1188 (UINT32)-1,\r
1189 (UINT32)CpuIndex\r
1190 );\r
1191 }\r
1192 }\r
1193 }\r
1194\r
1195 //\r
1196 // "mSmmMpSyncData->BspIndex == CpuIndex" means this is the BSP\r
1197 //\r
1198 if (mSmmMpSyncData->BspIndex == CpuIndex) {\r
1199\r
1200 //\r
1201 // Clear last request for SwitchBsp.\r
1202 //\r
1203 if (mSmmMpSyncData->SwitchBsp) {\r
1204 mSmmMpSyncData->SwitchBsp = FALSE;\r
1205 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
1206 mSmmMpSyncData->CandidateBsp[Index] = FALSE;\r
1207 }\r
1208 }\r
1209\r
1210 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r
1211 SmmProfileRecordSmiNum ();\r
1212 }\r
1213\r
1214 //\r
1215 // BSP Handler is always called with a ValidSmi == TRUE\r
1216 //\r
1217 BSPHandler (CpuIndex, mSmmMpSyncData->EffectiveSyncMode);\r
1218 } else {\r
1219 APHandler (CpuIndex, ValidSmi, mSmmMpSyncData->EffectiveSyncMode);\r
1220 }\r
1221 }\r
1222\r
1223 ASSERT (*mSmmMpSyncData->CpuData[CpuIndex].Run == 0);\r
1224\r
1225 //\r
1226 // Wait for BSP's signal to exit SMI\r
1227 //\r
1228 while (*mSmmMpSyncData->AllCpusInSync) {\r
1229 CpuPause ();\r
1230 }\r
1231 }\r
1232\r
1233Exit:\r
1234 SmmCpuFeaturesRendezvousExit (CpuIndex);\r
1235 //\r
1236 // Restore Cr2\r
1237 //\r
1238 AsmWriteCr2 (Cr2);\r
1239}\r
1240\r
1241/**\r
1242 Allocate buffer for all semaphores and spin locks.\r
1243\r
1244**/\r
1245VOID\r
1246InitializeSmmCpuSemaphores (\r
1247 VOID\r
1248 )\r
1249{\r
1250 UINTN ProcessorCount;\r
1251 UINTN TotalSize;\r
1252 UINTN GlobalSemaphoresSize;\r
1253 UINTN CpuSemaphoresSize;\r
1254 UINTN MsrSemahporeSize;\r
1255 UINTN SemaphoreSize;\r
1256 UINTN Pages;\r
1257 UINTN *SemaphoreBlock;\r
1258 UINTN SemaphoreAddr;\r
1259\r
1260 SemaphoreSize = GetSpinLockProperties ();\r
1261 ProcessorCount = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;\r
1262 GlobalSemaphoresSize = (sizeof (SMM_CPU_SEMAPHORE_GLOBAL) / sizeof (VOID *)) * SemaphoreSize;\r
1263 CpuSemaphoresSize = (sizeof (SMM_CPU_SEMAPHORE_CPU) / sizeof (VOID *)) * ProcessorCount * SemaphoreSize;\r
1264 MsrSemahporeSize = MSR_SPIN_LOCK_INIT_NUM * SemaphoreSize;\r
1265 TotalSize = GlobalSemaphoresSize + CpuSemaphoresSize + MsrSemahporeSize;\r
1266 DEBUG((EFI_D_INFO, "One Semaphore Size = 0x%x\n", SemaphoreSize));\r
1267 DEBUG((EFI_D_INFO, "Total Semaphores Size = 0x%x\n", TotalSize));\r
1268 Pages = EFI_SIZE_TO_PAGES (TotalSize);\r
1269 SemaphoreBlock = AllocatePages (Pages);\r
1270 ASSERT (SemaphoreBlock != NULL);\r
1271 ZeroMem (SemaphoreBlock, TotalSize);\r
1272\r
1273 SemaphoreAddr = (UINTN)SemaphoreBlock;\r
1274 mSmmCpuSemaphores.SemaphoreGlobal.Counter = (UINT32 *)SemaphoreAddr;\r
1275 SemaphoreAddr += SemaphoreSize;\r
1276 mSmmCpuSemaphores.SemaphoreGlobal.InsideSmm = (BOOLEAN *)SemaphoreAddr;\r
1277 SemaphoreAddr += SemaphoreSize;\r
1278 mSmmCpuSemaphores.SemaphoreGlobal.AllCpusInSync = (BOOLEAN *)SemaphoreAddr;\r
1279 SemaphoreAddr += SemaphoreSize;\r
1280 mSmmCpuSemaphores.SemaphoreGlobal.PFLock = (SPIN_LOCK *)SemaphoreAddr;\r
1281 SemaphoreAddr += SemaphoreSize;\r
1282 mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock\r
1283 = (SPIN_LOCK *)SemaphoreAddr;\r
1284 SemaphoreAddr += SemaphoreSize;\r
1285 mSmmCpuSemaphores.SemaphoreGlobal.MemoryMappedLock\r
1286 = (SPIN_LOCK *)SemaphoreAddr;\r
1287\r
1288 SemaphoreAddr = (UINTN)SemaphoreBlock + GlobalSemaphoresSize;\r
1289 mSmmCpuSemaphores.SemaphoreCpu.Busy = (SPIN_LOCK *)SemaphoreAddr;\r
1290 SemaphoreAddr += ProcessorCount * SemaphoreSize;\r
1291 mSmmCpuSemaphores.SemaphoreCpu.Run = (UINT32 *)SemaphoreAddr;\r
1292 SemaphoreAddr += ProcessorCount * SemaphoreSize;\r
1293 mSmmCpuSemaphores.SemaphoreCpu.Present = (BOOLEAN *)SemaphoreAddr;\r
1294\r
1295 SemaphoreAddr = (UINTN)SemaphoreBlock + GlobalSemaphoresSize + CpuSemaphoresSize;\r
1296 mSmmCpuSemaphores.SemaphoreMsr.Msr = (SPIN_LOCK *)SemaphoreAddr;\r
1297 mSmmCpuSemaphores.SemaphoreMsr.AvailableCounter =\r
1298 ((UINTN)SemaphoreBlock + Pages * SIZE_4KB - SemaphoreAddr) / SemaphoreSize;\r
1299 ASSERT (mSmmCpuSemaphores.SemaphoreMsr.AvailableCounter >= MSR_SPIN_LOCK_INIT_NUM);\r
1300\r
1301 mPFLock = mSmmCpuSemaphores.SemaphoreGlobal.PFLock;\r
1302 mConfigSmmCodeAccessCheckLock = mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock;\r
1303 mMemoryMappedLock = mSmmCpuSemaphores.SemaphoreGlobal.MemoryMappedLock;\r
1304\r
1305 mSemaphoreSize = SemaphoreSize;\r
1306}\r
1307\r
1308/**\r
1309 Initialize un-cacheable data.\r
1310\r
1311**/\r
1312VOID\r
1313EFIAPI\r
1314InitializeMpSyncData (\r
1315 VOID\r
1316 )\r
1317{\r
1318 UINTN CpuIndex;\r
1319\r
1320 if (mSmmMpSyncData != NULL) {\r
1321 //\r
1322 // mSmmMpSyncDataSize includes one structure of SMM_DISPATCHER_MP_SYNC_DATA, one\r
1323 // CpuData array of SMM_CPU_DATA_BLOCK and one CandidateBsp array of BOOLEAN.\r
1324 //\r
1325 ZeroMem (mSmmMpSyncData, mSmmMpSyncDataSize);\r
1326 mSmmMpSyncData->CpuData = (SMM_CPU_DATA_BLOCK *)((UINT8 *)mSmmMpSyncData + sizeof (SMM_DISPATCHER_MP_SYNC_DATA));\r
1327 mSmmMpSyncData->CandidateBsp = (BOOLEAN *)(mSmmMpSyncData->CpuData + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus);\r
1328 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {\r
1329 //\r
1330 // Enable BSP election by setting BspIndex to -1\r
1331 //\r
1332 mSmmMpSyncData->BspIndex = (UINT32)-1;\r
1333 }\r
1334 mSmmMpSyncData->EffectiveSyncMode = mCpuSmmSyncMode;\r
1335\r
1336 mSmmMpSyncData->Counter = mSmmCpuSemaphores.SemaphoreGlobal.Counter;\r
1337 mSmmMpSyncData->InsideSmm = mSmmCpuSemaphores.SemaphoreGlobal.InsideSmm;\r
1338 mSmmMpSyncData->AllCpusInSync = mSmmCpuSemaphores.SemaphoreGlobal.AllCpusInSync;\r
1339 ASSERT (mSmmMpSyncData->Counter != NULL && mSmmMpSyncData->InsideSmm != NULL &&\r
1340 mSmmMpSyncData->AllCpusInSync != NULL);\r
1341 *mSmmMpSyncData->Counter = 0;\r
1342 *mSmmMpSyncData->InsideSmm = FALSE;\r
1343 *mSmmMpSyncData->AllCpusInSync = FALSE;\r
1344\r
1345 for (CpuIndex = 0; CpuIndex < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; CpuIndex ++) {\r
1346 mSmmMpSyncData->CpuData[CpuIndex].Busy =\r
1347 (SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Busy + mSemaphoreSize * CpuIndex);\r
1348 mSmmMpSyncData->CpuData[CpuIndex].Run =\r
1349 (UINT32 *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Run + mSemaphoreSize * CpuIndex);\r
1350 mSmmMpSyncData->CpuData[CpuIndex].Present =\r
1351 (BOOLEAN *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Present + mSemaphoreSize * CpuIndex);\r
1352 *(mSmmMpSyncData->CpuData[CpuIndex].Busy) = 0;\r
1353 *(mSmmMpSyncData->CpuData[CpuIndex].Run) = 0;\r
1354 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;\r
1355 }\r
1356 }\r
1357}\r
1358\r
1359/**\r
1360 Initialize global data for MP synchronization.\r
1361\r
1362 @param Stacks Base address of SMI stack buffer for all processors.\r
1363 @param StackSize Stack size for each processor in SMM.\r
1364\r
1365**/\r
1366UINT32\r
1367InitializeMpServiceData (\r
1368 IN VOID *Stacks,\r
1369 IN UINTN StackSize\r
1370 )\r
1371{\r
1372 UINT32 Cr3;\r
1373 UINTN Index;\r
1374 UINT8 *GdtTssTables;\r
1375 UINTN GdtTableStepSize;\r
1376\r
1377 //\r
1378 // Allocate memory for all locks and semaphores\r
1379 //\r
1380 InitializeSmmCpuSemaphores ();\r
1381\r
1382 //\r
1383 // Initialize mSmmMpSyncData\r
1384 //\r
1385 mSmmMpSyncDataSize = sizeof (SMM_DISPATCHER_MP_SYNC_DATA) +\r
1386 (sizeof (SMM_CPU_DATA_BLOCK) + sizeof (BOOLEAN)) * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;\r
1387 mSmmMpSyncData = (SMM_DISPATCHER_MP_SYNC_DATA*) AllocatePages (EFI_SIZE_TO_PAGES (mSmmMpSyncDataSize));\r
1388 ASSERT (mSmmMpSyncData != NULL);\r
1389 mCpuSmmSyncMode = (SMM_CPU_SYNC_MODE)PcdGet8 (PcdCpuSmmSyncMode);\r
1390 InitializeMpSyncData ();\r
1391\r
1392 //\r
1393 // Initialize physical address mask\r
1394 // NOTE: Physical memory above virtual address limit is not supported !!!\r
1395 //\r
1396 AsmCpuid (0x80000008, (UINT32*)&Index, NULL, NULL, NULL);\r
1397 gPhyMask = LShiftU64 (1, (UINT8)Index) - 1;\r
1398 gPhyMask &= (1ull << 48) - EFI_PAGE_SIZE;\r
1399\r
1400 //\r
1401 // Create page tables\r
1402 //\r
1403 Cr3 = SmmInitPageTable ();\r
1404\r
1405 GdtTssTables = InitGdt (Cr3, &GdtTableStepSize);\r
1406\r
1407 //\r
1408 // Install SMI handler for each CPU\r
1409 //\r
1410 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
1411 InstallSmiHandler (\r
1412 Index,\r
1413 (UINT32)mCpuHotPlugData.SmBase[Index],\r
1414 (VOID*)((UINTN)Stacks + (StackSize * Index)),\r
1415 StackSize,\r
1416 (UINTN)(GdtTssTables + GdtTableStepSize * Index),\r
1417 gcSmiGdtr.Limit + 1,\r
1418 gcSmiIdtr.Base,\r
1419 gcSmiIdtr.Limit + 1,\r
1420 Cr3\r
1421 );\r
1422 }\r
1423\r
1424 //\r
1425 // Record current MTRR settings\r
1426 //\r
1427 ZeroMem (&gSmiMtrrs, sizeof (gSmiMtrrs));\r
1428 MtrrGetAllMtrrs (&gSmiMtrrs);\r
1429\r
1430 return Cr3;\r
1431}\r
1432\r
1433/**\r
1434\r
1435 Register the SMM Foundation entry point.\r
1436\r
1437 @param This Pointer to EFI_SMM_CONFIGURATION_PROTOCOL instance\r
1438 @param SmmEntryPoint SMM Foundation EntryPoint\r
1439\r
1440 @retval EFI_SUCCESS Successfully to register SMM foundation entry point\r
1441\r
1442**/\r
1443EFI_STATUS\r
1444EFIAPI\r
1445RegisterSmmEntry (\r
1446 IN CONST EFI_SMM_CONFIGURATION_PROTOCOL *This,\r
1447 IN EFI_SMM_ENTRY_POINT SmmEntryPoint\r
1448 )\r
1449{\r
1450 //\r
1451 // Record SMM Foundation EntryPoint, later invoke it on SMI entry vector.\r
1452 //\r
1453 gSmmCpuPrivate->SmmCoreEntry = SmmEntryPoint;\r
1454 return EFI_SUCCESS;\r
1455}\r