]> git.proxmox.com Git - mirror_edk2.git/blame_incremental - UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c
MdeModulePkg/PiSmmCore: Cache CommunicationBuffer info before using it
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / MpService.c
... / ...
CommitLineData
1/** @file\r
2SMM MP service implementation\r
3\r
4Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.<BR>\r
5This program and the accompanying materials\r
6are licensed and made available under the terms and conditions of the BSD License\r
7which accompanies this distribution. The full text of the license may be found at\r
8http://opensource.org/licenses/bsd-license.php\r
9\r
10THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
11WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
12\r
13**/\r
14\r
15#include "PiSmmCpuDxeSmm.h"\r
16\r
17//\r
18// Slots for all MTRR( FIXED MTRR + VARIABLE MTRR + MTRR_LIB_IA32_MTRR_DEF_TYPE)\r
19//\r
20UINT64 gSmiMtrrs[MTRR_NUMBER_OF_FIXED_MTRR + 2 * MTRR_NUMBER_OF_VARIABLE_MTRR + 1];\r
21UINT64 gPhyMask;\r
22SMM_DISPATCHER_MP_SYNC_DATA *mSmmMpSyncData = NULL;\r
23UINTN mSmmMpSyncDataSize;\r
24SMM_CPU_SEMAPHORES mSmmCpuSemaphores;\r
25UINTN mSemaphoreSize;\r
26SPIN_LOCK *mPFLock = NULL;\r
27\r
28/**\r
29 Performs an atomic compare exchange operation to get semaphore.\r
30 The compare exchange operation must be performed using\r
31 MP safe mechanisms.\r
32\r
33 @param Sem IN: 32-bit unsigned integer\r
34 OUT: original integer - 1\r
35 @return Original integer - 1\r
36\r
37**/\r
38UINT32\r
39WaitForSemaphore (\r
40 IN OUT volatile UINT32 *Sem\r
41 )\r
42{\r
43 UINT32 Value;\r
44\r
45 do {\r
46 Value = *Sem;\r
47 } while (Value == 0 ||\r
48 InterlockedCompareExchange32 (\r
49 (UINT32*)Sem,\r
50 Value,\r
51 Value - 1\r
52 ) != Value);\r
53 return Value - 1;\r
54}\r
55\r
56\r
57/**\r
58 Performs an atomic compare exchange operation to release semaphore.\r
59 The compare exchange operation must be performed using\r
60 MP safe mechanisms.\r
61\r
62 @param Sem IN: 32-bit unsigned integer\r
63 OUT: original integer + 1\r
64 @return Original integer + 1\r
65\r
66**/\r
67UINT32\r
68ReleaseSemaphore (\r
69 IN OUT volatile UINT32 *Sem\r
70 )\r
71{\r
72 UINT32 Value;\r
73\r
74 do {\r
75 Value = *Sem;\r
76 } while (Value + 1 != 0 &&\r
77 InterlockedCompareExchange32 (\r
78 (UINT32*)Sem,\r
79 Value,\r
80 Value + 1\r
81 ) != Value);\r
82 return Value + 1;\r
83}\r
84\r
85/**\r
86 Performs an atomic compare exchange operation to lock semaphore.\r
87 The compare exchange operation must be performed using\r
88 MP safe mechanisms.\r
89\r
90 @param Sem IN: 32-bit unsigned integer\r
91 OUT: -1\r
92 @return Original integer\r
93\r
94**/\r
95UINT32\r
96LockdownSemaphore (\r
97 IN OUT volatile UINT32 *Sem\r
98 )\r
99{\r
100 UINT32 Value;\r
101\r
102 do {\r
103 Value = *Sem;\r
104 } while (InterlockedCompareExchange32 (\r
105 (UINT32*)Sem,\r
106 Value, (UINT32)-1\r
107 ) != Value);\r
108 return Value;\r
109}\r
110\r
111/**\r
112 Wait all APs to performs an atomic compare exchange operation to release semaphore.\r
113\r
114 @param NumberOfAPs AP number\r
115\r
116**/\r
117VOID\r
118WaitForAllAPs (\r
119 IN UINTN NumberOfAPs\r
120 )\r
121{\r
122 UINTN BspIndex;\r
123\r
124 BspIndex = mSmmMpSyncData->BspIndex;\r
125 while (NumberOfAPs-- > 0) {\r
126 WaitForSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
127 }\r
128}\r
129\r
130/**\r
131 Performs an atomic compare exchange operation to release semaphore\r
132 for each AP.\r
133\r
134**/\r
135VOID\r
136ReleaseAllAPs (\r
137 VOID\r
138 )\r
139{\r
140 UINTN Index;\r
141 UINTN BspIndex;\r
142\r
143 BspIndex = mSmmMpSyncData->BspIndex;\r
144 for (Index = mMaxNumberOfCpus; Index-- > 0;) {\r
145 if (Index != BspIndex && *(mSmmMpSyncData->CpuData[Index].Present)) {\r
146 ReleaseSemaphore (mSmmMpSyncData->CpuData[Index].Run);\r
147 }\r
148 }\r
149}\r
150\r
151/**\r
152 Checks if all CPUs (with certain exceptions) have checked in for this SMI run\r
153\r
154 @param Exceptions CPU Arrival exception flags.\r
155\r
156 @retval TRUE if all CPUs the have checked in.\r
157 @retval FALSE if at least one Normal AP hasn't checked in.\r
158\r
159**/\r
160BOOLEAN\r
161AllCpusInSmmWithExceptions (\r
162 SMM_CPU_ARRIVAL_EXCEPTIONS Exceptions\r
163 )\r
164{\r
165 UINTN Index;\r
166 SMM_CPU_DATA_BLOCK *CpuData;\r
167 EFI_PROCESSOR_INFORMATION *ProcessorInfo;\r
168\r
169 ASSERT (*mSmmMpSyncData->Counter <= mNumberOfCpus);\r
170\r
171 if (*mSmmMpSyncData->Counter == mNumberOfCpus) {\r
172 return TRUE;\r
173 }\r
174\r
175 CpuData = mSmmMpSyncData->CpuData;\r
176 ProcessorInfo = gSmmCpuPrivate->ProcessorInfo;\r
177 for (Index = mMaxNumberOfCpus; Index-- > 0;) {\r
178 if (!(*(CpuData[Index].Present)) && ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID) {\r
179 if (((Exceptions & ARRIVAL_EXCEPTION_DELAYED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmDelayed) != 0) {\r
180 continue;\r
181 }\r
182 if (((Exceptions & ARRIVAL_EXCEPTION_BLOCKED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmBlocked) != 0) {\r
183 continue;\r
184 }\r
185 if (((Exceptions & ARRIVAL_EXCEPTION_SMI_DISABLED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmEnable) != 0) {\r
186 continue;\r
187 }\r
188 return FALSE;\r
189 }\r
190 }\r
191\r
192\r
193 return TRUE;\r
194}\r
195\r
196\r
197/**\r
198 Given timeout constraint, wait for all APs to arrive, and insure when this function returns, no AP will execute normal mode code before\r
199 entering SMM, except SMI disabled APs.\r
200\r
201**/\r
202VOID\r
203SmmWaitForApArrival (\r
204 VOID\r
205 )\r
206{\r
207 UINT64 Timer;\r
208 UINTN Index;\r
209\r
210 ASSERT (*mSmmMpSyncData->Counter <= mNumberOfCpus);\r
211\r
212 //\r
213 // Platform implementor should choose a timeout value appropriately:\r
214 // - The timeout value should balance the SMM time constrains and the likelihood that delayed CPUs are excluded in the SMM run. Note\r
215 // the SMI Handlers must ALWAYS take into account the cases that not all APs are available in an SMI run.\r
216 // - The timeout value must, in the case of 2nd timeout, be at least long enough to give time for all APs to receive the SMI IPI\r
217 // and either enter SMM or buffer the SMI, to insure there is no CPU running normal mode code when SMI handling starts. This will\r
218 // be TRUE even if a blocked CPU is brought out of the blocked state by a normal mode CPU (before the normal mode CPU received the\r
219 // SMI IPI), because with a buffered SMI, and CPU will enter SMM immediately after it is brought out of the blocked state.\r
220 // - The timeout value must be longer than longest possible IO operation in the system\r
221 //\r
222\r
223 //\r
224 // Sync with APs 1st timeout\r
225 //\r
226 for (Timer = StartSyncTimer ();\r
227 !IsSyncTimerTimeout (Timer) &&\r
228 !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED | ARRIVAL_EXCEPTION_SMI_DISABLED );\r
229 ) {\r
230 CpuPause ();\r
231 }\r
232\r
233 //\r
234 // Not all APs have arrived, so we need 2nd round of timeout. IPIs should be sent to ALL none present APs,\r
235 // because:\r
236 // a) Delayed AP may have just come out of the delayed state. Blocked AP may have just been brought out of blocked state by some AP running\r
237 // normal mode code. These APs need to be guaranteed to have an SMI pending to insure that once they are out of delayed / blocked state, they\r
238 // enter SMI immediately without executing instructions in normal mode. Note traditional flow requires there are no APs doing normal mode\r
239 // work while SMI handling is on-going.\r
240 // b) As a consequence of SMI IPI sending, (spurious) SMI may occur after this SMM run.\r
241 // c) ** NOTE **: Use SMI disabling feature VERY CAREFULLY (if at all) for traditional flow, because a processor in SMI-disabled state\r
242 // will execute normal mode code, which breaks the traditional SMI handlers' assumption that no APs are doing normal\r
243 // mode work while SMI handling is on-going.\r
244 // d) We don't add code to check SMI disabling status to skip sending IPI to SMI disabled APs, because:\r
245 // - In traditional flow, SMI disabling is discouraged.\r
246 // - In relaxed flow, CheckApArrival() will check SMI disabling status before calling this function.\r
247 // In both cases, adding SMI-disabling checking code increases overhead.\r
248 //\r
249 if (*mSmmMpSyncData->Counter < mNumberOfCpus) {\r
250 //\r
251 // Send SMI IPIs to bring outside processors in\r
252 //\r
253 for (Index = mMaxNumberOfCpus; Index-- > 0;) {\r
254 if (!(*(mSmmMpSyncData->CpuData[Index].Present)) && gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID) {\r
255 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId);\r
256 }\r
257 }\r
258\r
259 //\r
260 // Sync with APs 2nd timeout.\r
261 //\r
262 for (Timer = StartSyncTimer ();\r
263 !IsSyncTimerTimeout (Timer) &&\r
264 !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED | ARRIVAL_EXCEPTION_SMI_DISABLED );\r
265 ) {\r
266 CpuPause ();\r
267 }\r
268 }\r
269\r
270 return;\r
271}\r
272\r
273\r
274/**\r
275 Replace OS MTRR's with SMI MTRR's.\r
276\r
277 @param CpuIndex Processor Index\r
278\r
279**/\r
280VOID\r
281ReplaceOSMtrrs (\r
282 IN UINTN CpuIndex\r
283 )\r
284{\r
285 PROCESSOR_SMM_DESCRIPTOR *Psd;\r
286 UINT64 *SmiMtrrs;\r
287 MTRR_SETTINGS *BiosMtrr;\r
288\r
289 Psd = (PROCESSOR_SMM_DESCRIPTOR*)(mCpuHotPlugData.SmBase[CpuIndex] + SMM_PSD_OFFSET);\r
290 SmiMtrrs = (UINT64*)(UINTN)Psd->MtrrBaseMaskPtr;\r
291\r
292 SmmCpuFeaturesDisableSmrr ();\r
293\r
294 //\r
295 // Replace all MTRRs registers\r
296 //\r
297 BiosMtrr = (MTRR_SETTINGS*)SmiMtrrs;\r
298 MtrrSetAllMtrrs(BiosMtrr);\r
299}\r
300\r
301/**\r
302 SMI handler for BSP.\r
303\r
304 @param CpuIndex BSP processor Index\r
305 @param SyncMode SMM MP sync mode\r
306\r
307**/\r
308VOID\r
309BSPHandler (\r
310 IN UINTN CpuIndex,\r
311 IN SMM_CPU_SYNC_MODE SyncMode\r
312 )\r
313{\r
314 UINTN Index;\r
315 MTRR_SETTINGS Mtrrs;\r
316 UINTN ApCount;\r
317 BOOLEAN ClearTopLevelSmiResult;\r
318 UINTN PresentCount;\r
319\r
320 ASSERT (CpuIndex == mSmmMpSyncData->BspIndex);\r
321 ApCount = 0;\r
322\r
323 //\r
324 // Flag BSP's presence\r
325 //\r
326 *mSmmMpSyncData->InsideSmm = TRUE;\r
327\r
328 //\r
329 // Initialize Debug Agent to start source level debug in BSP handler\r
330 //\r
331 InitializeDebugAgent (DEBUG_AGENT_INIT_ENTER_SMI, NULL, NULL);\r
332\r
333 //\r
334 // Mark this processor's presence\r
335 //\r
336 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = TRUE;\r
337\r
338 //\r
339 // Clear platform top level SMI status bit before calling SMI handlers. If\r
340 // we cleared it after SMI handlers are run, we would miss the SMI that\r
341 // occurs after SMI handlers are done and before SMI status bit is cleared.\r
342 //\r
343 ClearTopLevelSmiResult = ClearTopLevelSmiStatus();\r
344 ASSERT (ClearTopLevelSmiResult == TRUE);\r
345\r
346 //\r
347 // Set running processor index\r
348 //\r
349 gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu = CpuIndex;\r
350\r
351 //\r
352 // If Traditional Sync Mode or need to configure MTRRs: gather all available APs.\r
353 //\r
354 if (SyncMode == SmmCpuSyncModeTradition || SmmCpuFeaturesNeedConfigureMtrrs()) {\r
355\r
356 //\r
357 // Wait for APs to arrive\r
358 //\r
359 SmmWaitForApArrival();\r
360\r
361 //\r
362 // Lock the counter down and retrieve the number of APs\r
363 //\r
364 *mSmmMpSyncData->AllCpusInSync = TRUE;\r
365 ApCount = LockdownSemaphore (mSmmMpSyncData->Counter) - 1;\r
366\r
367 //\r
368 // Wait for all APs to get ready for programming MTRRs\r
369 //\r
370 WaitForAllAPs (ApCount);\r
371\r
372 if (SmmCpuFeaturesNeedConfigureMtrrs()) {\r
373 //\r
374 // Signal all APs it's time for backup MTRRs\r
375 //\r
376 ReleaseAllAPs ();\r
377\r
378 //\r
379 // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at\r
380 // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set\r
381 // to a large enough value to avoid this situation.\r
382 // Note: For HT capable CPUs, threads within a core share the same set of MTRRs.\r
383 // We do the backup first and then set MTRR to avoid race condition for threads\r
384 // in the same core.\r
385 //\r
386 MtrrGetAllMtrrs(&Mtrrs);\r
387\r
388 //\r
389 // Wait for all APs to complete their MTRR saving\r
390 //\r
391 WaitForAllAPs (ApCount);\r
392\r
393 //\r
394 // Let all processors program SMM MTRRs together\r
395 //\r
396 ReleaseAllAPs ();\r
397\r
398 //\r
399 // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at\r
400 // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set\r
401 // to a large enough value to avoid this situation.\r
402 //\r
403 ReplaceOSMtrrs (CpuIndex);\r
404\r
405 //\r
406 // Wait for all APs to complete their MTRR programming\r
407 //\r
408 WaitForAllAPs (ApCount);\r
409 }\r
410 }\r
411\r
412 //\r
413 // The BUSY lock is initialized to Acquired state\r
414 //\r
415 AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
416\r
417 //\r
418 // Perform the pre tasks\r
419 //\r
420 PerformPreTasks ();\r
421\r
422 //\r
423 // Invoke SMM Foundation EntryPoint with the processor information context.\r
424 //\r
425 gSmmCpuPrivate->SmmCoreEntry (&gSmmCpuPrivate->SmmCoreEntryContext);\r
426\r
427 //\r
428 // Make sure all APs have completed their pending none-block tasks\r
429 //\r
430 for (Index = mMaxNumberOfCpus; Index-- > 0;) {\r
431 if (Index != CpuIndex && *(mSmmMpSyncData->CpuData[Index].Present)) {\r
432 AcquireSpinLock (mSmmMpSyncData->CpuData[Index].Busy);\r
433 ReleaseSpinLock (mSmmMpSyncData->CpuData[Index].Busy);\r
434 }\r
435 }\r
436\r
437 //\r
438 // Perform the remaining tasks\r
439 //\r
440 PerformRemainingTasks ();\r
441\r
442 //\r
443 // If Relaxed-AP Sync Mode: gather all available APs after BSP SMM handlers are done, and\r
444 // make those APs to exit SMI synchronously. APs which arrive later will be excluded and\r
445 // will run through freely.\r
446 //\r
447 if (SyncMode != SmmCpuSyncModeTradition && !SmmCpuFeaturesNeedConfigureMtrrs()) {\r
448\r
449 //\r
450 // Lock the counter down and retrieve the number of APs\r
451 //\r
452 *mSmmMpSyncData->AllCpusInSync = TRUE;\r
453 ApCount = LockdownSemaphore (mSmmMpSyncData->Counter) - 1;\r
454 //\r
455 // Make sure all APs have their Present flag set\r
456 //\r
457 while (TRUE) {\r
458 PresentCount = 0;\r
459 for (Index = mMaxNumberOfCpus; Index-- > 0;) {\r
460 if (*(mSmmMpSyncData->CpuData[Index].Present)) {\r
461 PresentCount ++;\r
462 }\r
463 }\r
464 if (PresentCount > ApCount) {\r
465 break;\r
466 }\r
467 }\r
468 }\r
469\r
470 //\r
471 // Notify all APs to exit\r
472 //\r
473 *mSmmMpSyncData->InsideSmm = FALSE;\r
474 ReleaseAllAPs ();\r
475\r
476 //\r
477 // Wait for all APs to complete their pending tasks\r
478 //\r
479 WaitForAllAPs (ApCount);\r
480\r
481 if (SmmCpuFeaturesNeedConfigureMtrrs()) {\r
482 //\r
483 // Signal APs to restore MTRRs\r
484 //\r
485 ReleaseAllAPs ();\r
486\r
487 //\r
488 // Restore OS MTRRs\r
489 //\r
490 SmmCpuFeaturesReenableSmrr ();\r
491 MtrrSetAllMtrrs(&Mtrrs);\r
492\r
493 //\r
494 // Wait for all APs to complete MTRR programming\r
495 //\r
496 WaitForAllAPs (ApCount);\r
497 }\r
498\r
499 //\r
500 // Stop source level debug in BSP handler, the code below will not be\r
501 // debugged.\r
502 //\r
503 InitializeDebugAgent (DEBUG_AGENT_INIT_EXIT_SMI, NULL, NULL);\r
504\r
505 //\r
506 // Signal APs to Reset states/semaphore for this processor\r
507 //\r
508 ReleaseAllAPs ();\r
509\r
510 //\r
511 // Perform pending operations for hot-plug\r
512 //\r
513 SmmCpuUpdate ();\r
514\r
515 //\r
516 // Clear the Present flag of BSP\r
517 //\r
518 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;\r
519\r
520 //\r
521 // Gather APs to exit SMM synchronously. Note the Present flag is cleared by now but\r
522 // WaitForAllAps does not depend on the Present flag.\r
523 //\r
524 WaitForAllAPs (ApCount);\r
525\r
526 //\r
527 // Reset BspIndex to -1, meaning BSP has not been elected.\r
528 //\r
529 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {\r
530 mSmmMpSyncData->BspIndex = (UINT32)-1;\r
531 }\r
532\r
533 //\r
534 // Allow APs to check in from this point on\r
535 //\r
536 *mSmmMpSyncData->Counter = 0;\r
537 *mSmmMpSyncData->AllCpusInSync = FALSE;\r
538}\r
539\r
540/**\r
541 SMI handler for AP.\r
542\r
543 @param CpuIndex AP processor Index.\r
544 @param ValidSmi Indicates that current SMI is a valid SMI or not.\r
545 @param SyncMode SMM MP sync mode.\r
546\r
547**/\r
548VOID\r
549APHandler (\r
550 IN UINTN CpuIndex,\r
551 IN BOOLEAN ValidSmi,\r
552 IN SMM_CPU_SYNC_MODE SyncMode\r
553 )\r
554{\r
555 UINT64 Timer;\r
556 UINTN BspIndex;\r
557 MTRR_SETTINGS Mtrrs;\r
558\r
559 //\r
560 // Timeout BSP\r
561 //\r
562 for (Timer = StartSyncTimer ();\r
563 !IsSyncTimerTimeout (Timer) &&\r
564 !(*mSmmMpSyncData->InsideSmm);\r
565 ) {\r
566 CpuPause ();\r
567 }\r
568\r
569 if (!(*mSmmMpSyncData->InsideSmm)) {\r
570 //\r
571 // BSP timeout in the first round\r
572 //\r
573 if (mSmmMpSyncData->BspIndex != -1) {\r
574 //\r
575 // BSP Index is known\r
576 //\r
577 BspIndex = mSmmMpSyncData->BspIndex;\r
578 ASSERT (CpuIndex != BspIndex);\r
579\r
580 //\r
581 // Send SMI IPI to bring BSP in\r
582 //\r
583 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[BspIndex].ProcessorId);\r
584\r
585 //\r
586 // Now clock BSP for the 2nd time\r
587 //\r
588 for (Timer = StartSyncTimer ();\r
589 !IsSyncTimerTimeout (Timer) &&\r
590 !(*mSmmMpSyncData->InsideSmm);\r
591 ) {\r
592 CpuPause ();\r
593 }\r
594\r
595 if (!(*mSmmMpSyncData->InsideSmm)) {\r
596 //\r
597 // Give up since BSP is unable to enter SMM\r
598 // and signal the completion of this AP\r
599 WaitForSemaphore (mSmmMpSyncData->Counter);\r
600 return;\r
601 }\r
602 } else {\r
603 //\r
604 // Don't know BSP index. Give up without sending IPI to BSP.\r
605 //\r
606 WaitForSemaphore (mSmmMpSyncData->Counter);\r
607 return;\r
608 }\r
609 }\r
610\r
611 //\r
612 // BSP is available\r
613 //\r
614 BspIndex = mSmmMpSyncData->BspIndex;\r
615 ASSERT (CpuIndex != BspIndex);\r
616\r
617 //\r
618 // Mark this processor's presence\r
619 //\r
620 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = TRUE;\r
621\r
622 if (SyncMode == SmmCpuSyncModeTradition || SmmCpuFeaturesNeedConfigureMtrrs()) {\r
623 //\r
624 // Notify BSP of arrival at this point\r
625 //\r
626 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
627 }\r
628\r
629 if (SmmCpuFeaturesNeedConfigureMtrrs()) {\r
630 //\r
631 // Wait for the signal from BSP to backup MTRRs\r
632 //\r
633 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r
634\r
635 //\r
636 // Backup OS MTRRs\r
637 //\r
638 MtrrGetAllMtrrs(&Mtrrs);\r
639\r
640 //\r
641 // Signal BSP the completion of this AP\r
642 //\r
643 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
644\r
645 //\r
646 // Wait for BSP's signal to program MTRRs\r
647 //\r
648 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r
649\r
650 //\r
651 // Replace OS MTRRs with SMI MTRRs\r
652 //\r
653 ReplaceOSMtrrs (CpuIndex);\r
654\r
655 //\r
656 // Signal BSP the completion of this AP\r
657 //\r
658 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
659 }\r
660\r
661 while (TRUE) {\r
662 //\r
663 // Wait for something to happen\r
664 //\r
665 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r
666\r
667 //\r
668 // Check if BSP wants to exit SMM\r
669 //\r
670 if (!(*mSmmMpSyncData->InsideSmm)) {\r
671 break;\r
672 }\r
673\r
674 //\r
675 // BUSY should be acquired by SmmStartupThisAp()\r
676 //\r
677 ASSERT (\r
678 !AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[CpuIndex].Busy)\r
679 );\r
680\r
681 //\r
682 // Invoke the scheduled procedure\r
683 //\r
684 (*mSmmMpSyncData->CpuData[CpuIndex].Procedure) (\r
685 (VOID*)mSmmMpSyncData->CpuData[CpuIndex].Parameter\r
686 );\r
687\r
688 //\r
689 // Release BUSY\r
690 //\r
691 ReleaseSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
692 }\r
693\r
694 if (SmmCpuFeaturesNeedConfigureMtrrs()) {\r
695 //\r
696 // Notify BSP the readiness of this AP to program MTRRs\r
697 //\r
698 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
699\r
700 //\r
701 // Wait for the signal from BSP to program MTRRs\r
702 //\r
703 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r
704\r
705 //\r
706 // Restore OS MTRRs\r
707 //\r
708 SmmCpuFeaturesReenableSmrr ();\r
709 MtrrSetAllMtrrs(&Mtrrs);\r
710 }\r
711\r
712 //\r
713 // Notify BSP the readiness of this AP to Reset states/semaphore for this processor\r
714 //\r
715 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
716\r
717 //\r
718 // Wait for the signal from BSP to Reset states/semaphore for this processor\r
719 //\r
720 WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r
721\r
722 //\r
723 // Reset states/semaphore for this processor\r
724 //\r
725 *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;\r
726\r
727 //\r
728 // Notify BSP the readiness of this AP to exit SMM\r
729 //\r
730 ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r
731\r
732}\r
733\r
734/**\r
735 Create 4G PageTable in SMRAM.\r
736\r
737 @param[in] Is32BitPageTable Whether the page table is 32-bit PAE\r
738 @return PageTable Address\r
739\r
740**/\r
741UINT32\r
742Gen4GPageTable (\r
743 IN BOOLEAN Is32BitPageTable\r
744 )\r
745{\r
746 VOID *PageTable;\r
747 UINTN Index;\r
748 UINT64 *Pte;\r
749 UINTN PagesNeeded;\r
750 UINTN Low2MBoundary;\r
751 UINTN High2MBoundary;\r
752 UINTN Pages;\r
753 UINTN GuardPage;\r
754 UINT64 *Pdpte;\r
755 UINTN PageIndex;\r
756 UINTN PageAddress;\r
757\r
758 Low2MBoundary = 0;\r
759 High2MBoundary = 0;\r
760 PagesNeeded = 0;\r
761 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
762 //\r
763 // Add one more page for known good stack, then find the lower 2MB aligned address.\r
764 //\r
765 Low2MBoundary = (mSmmStackArrayBase + EFI_PAGE_SIZE) & ~(SIZE_2MB-1);\r
766 //\r
767 // Add two more pages for known good stack and stack guard page,\r
768 // then find the lower 2MB aligned address.\r
769 //\r
770 High2MBoundary = (mSmmStackArrayEnd - mSmmStackSize + EFI_PAGE_SIZE * 2) & ~(SIZE_2MB-1);\r
771 PagesNeeded = ((High2MBoundary - Low2MBoundary) / SIZE_2MB) + 1;\r
772 }\r
773 //\r
774 // Allocate the page table\r
775 //\r
776 PageTable = AllocatePageTableMemory (5 + PagesNeeded);\r
777 ASSERT (PageTable != NULL);\r
778\r
779 PageTable = (VOID *)((UINTN)PageTable);\r
780 Pte = (UINT64*)PageTable;\r
781\r
782 //\r
783 // Zero out all page table entries first\r
784 //\r
785 ZeroMem (Pte, EFI_PAGES_TO_SIZE (1));\r
786\r
787 //\r
788 // Set Page Directory Pointers\r
789 //\r
790 for (Index = 0; Index < 4; Index++) {\r
791 Pte[Index] = (UINTN)PageTable + EFI_PAGE_SIZE * (Index + 1) + (Is32BitPageTable ? IA32_PAE_PDPTE_ATTRIBUTE_BITS : PAGE_ATTRIBUTE_BITS);\r
792 }\r
793 Pte += EFI_PAGE_SIZE / sizeof (*Pte);\r
794\r
795 //\r
796 // Fill in Page Directory Entries\r
797 //\r
798 for (Index = 0; Index < EFI_PAGE_SIZE * 4 / sizeof (*Pte); Index++) {\r
799 Pte[Index] = (Index << 21) | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;\r
800 }\r
801\r
802 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
803 Pages = (UINTN)PageTable + EFI_PAGES_TO_SIZE (5);\r
804 GuardPage = mSmmStackArrayBase + EFI_PAGE_SIZE;\r
805 Pdpte = (UINT64*)PageTable;\r
806 for (PageIndex = Low2MBoundary; PageIndex <= High2MBoundary; PageIndex += SIZE_2MB) {\r
807 Pte = (UINT64*)(UINTN)(Pdpte[BitFieldRead32 ((UINT32)PageIndex, 30, 31)] & ~(EFI_PAGE_SIZE - 1));\r
808 Pte[BitFieldRead32 ((UINT32)PageIndex, 21, 29)] = (UINT64)Pages | PAGE_ATTRIBUTE_BITS;\r
809 //\r
810 // Fill in Page Table Entries\r
811 //\r
812 Pte = (UINT64*)Pages;\r
813 PageAddress = PageIndex;\r
814 for (Index = 0; Index < EFI_PAGE_SIZE / sizeof (*Pte); Index++) {\r
815 if (PageAddress == GuardPage) {\r
816 //\r
817 // Mark the guard page as non-present\r
818 //\r
819 Pte[Index] = PageAddress;\r
820 GuardPage += mSmmStackSize;\r
821 if (GuardPage > mSmmStackArrayEnd) {\r
822 GuardPage = 0;\r
823 }\r
824 } else {\r
825 Pte[Index] = PageAddress | PAGE_ATTRIBUTE_BITS;\r
826 }\r
827 PageAddress+= EFI_PAGE_SIZE;\r
828 }\r
829 Pages += EFI_PAGE_SIZE;\r
830 }\r
831 }\r
832\r
833 return (UINT32)(UINTN)PageTable;\r
834}\r
835\r
836/**\r
837 Set memory cache ability.\r
838\r
839 @param PageTable PageTable Address\r
840 @param Address Memory Address to change cache ability\r
841 @param Cacheability Cache ability to set\r
842\r
843**/\r
844VOID\r
845SetCacheability (\r
846 IN UINT64 *PageTable,\r
847 IN UINTN Address,\r
848 IN UINT8 Cacheability\r
849 )\r
850{\r
851 UINTN PTIndex;\r
852 VOID *NewPageTableAddress;\r
853 UINT64 *NewPageTable;\r
854 UINTN Index;\r
855\r
856 ASSERT ((Address & EFI_PAGE_MASK) == 0);\r
857\r
858 if (sizeof (UINTN) == sizeof (UINT64)) {\r
859 PTIndex = (UINTN)RShiftU64 (Address, 39) & 0x1ff;\r
860 ASSERT (PageTable[PTIndex] & IA32_PG_P);\r
861 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & gPhyMask);\r
862 }\r
863\r
864 PTIndex = (UINTN)RShiftU64 (Address, 30) & 0x1ff;\r
865 ASSERT (PageTable[PTIndex] & IA32_PG_P);\r
866 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & gPhyMask);\r
867\r
868 //\r
869 // A perfect implementation should check the original cacheability with the\r
870 // one being set, and break a 2M page entry into pieces only when they\r
871 // disagreed.\r
872 //\r
873 PTIndex = (UINTN)RShiftU64 (Address, 21) & 0x1ff;\r
874 if ((PageTable[PTIndex] & IA32_PG_PS) != 0) {\r
875 //\r
876 // Allocate a page from SMRAM\r
877 //\r
878 NewPageTableAddress = AllocatePageTableMemory (1);\r
879 ASSERT (NewPageTableAddress != NULL);\r
880\r
881 NewPageTable = (UINT64 *)NewPageTableAddress;\r
882\r
883 for (Index = 0; Index < 0x200; Index++) {\r
884 NewPageTable[Index] = PageTable[PTIndex];\r
885 if ((NewPageTable[Index] & IA32_PG_PAT_2M) != 0) {\r
886 NewPageTable[Index] &= ~((UINT64)IA32_PG_PAT_2M);\r
887 NewPageTable[Index] |= (UINT64)IA32_PG_PAT_4K;\r
888 }\r
889 NewPageTable[Index] |= (UINT64)(Index << EFI_PAGE_SHIFT);\r
890 }\r
891\r
892 PageTable[PTIndex] = ((UINTN)NewPageTableAddress & gPhyMask) | PAGE_ATTRIBUTE_BITS;\r
893 }\r
894\r
895 ASSERT (PageTable[PTIndex] & IA32_PG_P);\r
896 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & gPhyMask);\r
897\r
898 PTIndex = (UINTN)RShiftU64 (Address, 12) & 0x1ff;\r
899 ASSERT (PageTable[PTIndex] & IA32_PG_P);\r
900 PageTable[PTIndex] &= ~((UINT64)((IA32_PG_PAT_4K | IA32_PG_CD | IA32_PG_WT)));\r
901 PageTable[PTIndex] |= (UINT64)Cacheability;\r
902}\r
903\r
904/**\r
905 Schedule a procedure to run on the specified CPU.\r
906\r
907 @param[in] Procedure The address of the procedure to run\r
908 @param[in] CpuIndex Target CPU Index\r
909 @param[in, OUT] ProcArguments The parameter to pass to the procedure\r
910 @param[in] BlockingMode Startup AP in blocking mode or not\r
911\r
912 @retval EFI_INVALID_PARAMETER CpuNumber not valid\r
913 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP\r
914 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM\r
915 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy\r
916 @retval EFI_SUCCESS The procedure has been successfully scheduled\r
917\r
918**/\r
919EFI_STATUS\r
920InternalSmmStartupThisAp (\r
921 IN EFI_AP_PROCEDURE Procedure,\r
922 IN UINTN CpuIndex,\r
923 IN OUT VOID *ProcArguments OPTIONAL,\r
924 IN BOOLEAN BlockingMode\r
925 )\r
926{\r
927 if (CpuIndex >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus) {\r
928 DEBUG((DEBUG_ERROR, "CpuIndex(%d) >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus(%d)\n", CpuIndex, gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus));\r
929 return EFI_INVALID_PARAMETER;\r
930 }\r
931 if (CpuIndex == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) {\r
932 DEBUG((DEBUG_ERROR, "CpuIndex(%d) == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu\n", CpuIndex));\r
933 return EFI_INVALID_PARAMETER;\r
934 }\r
935 if (!(*(mSmmMpSyncData->CpuData[CpuIndex].Present))) {\r
936 if (mSmmMpSyncData->EffectiveSyncMode == SmmCpuSyncModeTradition) {\r
937 DEBUG((DEBUG_ERROR, "!mSmmMpSyncData->CpuData[%d].Present\n", CpuIndex));\r
938 }\r
939 return EFI_INVALID_PARAMETER;\r
940 }\r
941 if (gSmmCpuPrivate->Operation[CpuIndex] == SmmCpuRemove) {\r
942 if (!FeaturePcdGet (PcdCpuHotPlugSupport)) {\r
943 DEBUG((DEBUG_ERROR, "gSmmCpuPrivate->Operation[%d] == SmmCpuRemove\n", CpuIndex));\r
944 }\r
945 return EFI_INVALID_PARAMETER;\r
946 }\r
947\r
948 if (BlockingMode) {\r
949 AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
950 } else {\r
951 if (!AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[CpuIndex].Busy)) {\r
952 DEBUG((DEBUG_ERROR, "mSmmMpSyncData->CpuData[%d].Busy\n", CpuIndex));\r
953 return EFI_INVALID_PARAMETER;\r
954 }\r
955 }\r
956\r
957 mSmmMpSyncData->CpuData[CpuIndex].Procedure = Procedure;\r
958 mSmmMpSyncData->CpuData[CpuIndex].Parameter = ProcArguments;\r
959 ReleaseSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r
960\r
961 if (BlockingMode) {\r
962 AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
963 ReleaseSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
964 }\r
965 return EFI_SUCCESS;\r
966}\r
967\r
968/**\r
969 Schedule a procedure to run on the specified CPU in blocking mode.\r
970\r
971 @param[in] Procedure The address of the procedure to run\r
972 @param[in] CpuIndex Target CPU Index\r
973 @param[in, out] ProcArguments The parameter to pass to the procedure\r
974\r
975 @retval EFI_INVALID_PARAMETER CpuNumber not valid\r
976 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP\r
977 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM\r
978 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy\r
979 @retval EFI_SUCCESS The procedure has been successfully scheduled\r
980\r
981**/\r
982EFI_STATUS\r
983EFIAPI\r
984SmmBlockingStartupThisAp (\r
985 IN EFI_AP_PROCEDURE Procedure,\r
986 IN UINTN CpuIndex,\r
987 IN OUT VOID *ProcArguments OPTIONAL\r
988 )\r
989{\r
990 return InternalSmmStartupThisAp(Procedure, CpuIndex, ProcArguments, TRUE);\r
991}\r
992\r
993/**\r
994 Schedule a procedure to run on the specified CPU.\r
995\r
996 @param Procedure The address of the procedure to run\r
997 @param CpuIndex Target CPU Index\r
998 @param ProcArguments The parameter to pass to the procedure\r
999\r
1000 @retval EFI_INVALID_PARAMETER CpuNumber not valid\r
1001 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP\r
1002 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM\r
1003 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy\r
1004 @retval EFI_SUCCESS The procedure has been successfully scheduled\r
1005\r
1006**/\r
1007EFI_STATUS\r
1008EFIAPI\r
1009SmmStartupThisAp (\r
1010 IN EFI_AP_PROCEDURE Procedure,\r
1011 IN UINTN CpuIndex,\r
1012 IN OUT VOID *ProcArguments OPTIONAL\r
1013 )\r
1014{\r
1015 return InternalSmmStartupThisAp(Procedure, CpuIndex, ProcArguments, FeaturePcdGet (PcdCpuSmmBlockStartupThisAp));\r
1016}\r
1017\r
1018/**\r
1019 This function sets DR6 & DR7 according to SMM save state, before running SMM C code.\r
1020 They are useful when you want to enable hardware breakpoints in SMM without entry SMM mode.\r
1021\r
1022 NOTE: It might not be appreciated in runtime since it might\r
1023 conflict with OS debugging facilities. Turn them off in RELEASE.\r
1024\r
1025 @param CpuIndex CPU Index\r
1026\r
1027**/\r
1028VOID\r
1029EFIAPI\r
1030CpuSmmDebugEntry (\r
1031 IN UINTN CpuIndex\r
1032 )\r
1033{\r
1034 SMRAM_SAVE_STATE_MAP *CpuSaveState;\r
1035 \r
1036 if (FeaturePcdGet (PcdCpuSmmDebug)) {\r
1037 ASSERT(CpuIndex < mMaxNumberOfCpus);\r
1038 CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];\r
1039 if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {\r
1040 AsmWriteDr6 (CpuSaveState->x86._DR6);\r
1041 AsmWriteDr7 (CpuSaveState->x86._DR7);\r
1042 } else {\r
1043 AsmWriteDr6 ((UINTN)CpuSaveState->x64._DR6);\r
1044 AsmWriteDr7 ((UINTN)CpuSaveState->x64._DR7);\r
1045 }\r
1046 }\r
1047}\r
1048\r
1049/**\r
1050 This function restores DR6 & DR7 to SMM save state.\r
1051\r
1052 NOTE: It might not be appreciated in runtime since it might\r
1053 conflict with OS debugging facilities. Turn them off in RELEASE.\r
1054\r
1055 @param CpuIndex CPU Index\r
1056\r
1057**/\r
1058VOID\r
1059EFIAPI\r
1060CpuSmmDebugExit (\r
1061 IN UINTN CpuIndex\r
1062 )\r
1063{\r
1064 SMRAM_SAVE_STATE_MAP *CpuSaveState;\r
1065\r
1066 if (FeaturePcdGet (PcdCpuSmmDebug)) {\r
1067 ASSERT(CpuIndex < mMaxNumberOfCpus);\r
1068 CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];\r
1069 if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {\r
1070 CpuSaveState->x86._DR7 = (UINT32)AsmReadDr7 ();\r
1071 CpuSaveState->x86._DR6 = (UINT32)AsmReadDr6 ();\r
1072 } else {\r
1073 CpuSaveState->x64._DR7 = AsmReadDr7 ();\r
1074 CpuSaveState->x64._DR6 = AsmReadDr6 ();\r
1075 }\r
1076 }\r
1077}\r
1078\r
1079/**\r
1080 C function for SMI entry, each processor comes here upon SMI trigger.\r
1081\r
1082 @param CpuIndex CPU Index\r
1083\r
1084**/\r
1085VOID\r
1086EFIAPI\r
1087SmiRendezvous (\r
1088 IN UINTN CpuIndex\r
1089 )\r
1090{\r
1091 EFI_STATUS Status;\r
1092 BOOLEAN ValidSmi;\r
1093 BOOLEAN IsBsp;\r
1094 BOOLEAN BspInProgress;\r
1095 UINTN Index;\r
1096 UINTN Cr2;\r
1097\r
1098 ASSERT(CpuIndex < mMaxNumberOfCpus);\r
1099\r
1100 //\r
1101 // Save Cr2 because Page Fault exception in SMM may override its value\r
1102 //\r
1103 Cr2 = AsmReadCr2 ();\r
1104\r
1105 //\r
1106 // Perform CPU specific entry hooks\r
1107 //\r
1108 SmmCpuFeaturesRendezvousEntry (CpuIndex);\r
1109\r
1110 //\r
1111 // Determine if this is a valid SMI\r
1112 //\r
1113 ValidSmi = PlatformValidSmi();\r
1114\r
1115 //\r
1116 // Determine if BSP has been already in progress. Note this must be checked after\r
1117 // ValidSmi because BSP may clear a valid SMI source after checking in.\r
1118 //\r
1119 BspInProgress = *mSmmMpSyncData->InsideSmm;\r
1120\r
1121 if (!BspInProgress && !ValidSmi) {\r
1122 //\r
1123 // If we reach here, it means when we sampled the ValidSmi flag, SMI status had not\r
1124 // been cleared by BSP in a new SMI run (so we have a truly invalid SMI), or SMI\r
1125 // status had been cleared by BSP and an existing SMI run has almost ended. (Note\r
1126 // we sampled ValidSmi flag BEFORE judging BSP-in-progress status.) In both cases, there\r
1127 // is nothing we need to do.\r
1128 //\r
1129 goto Exit;\r
1130 } else {\r
1131 //\r
1132 // Signal presence of this processor\r
1133 //\r
1134 if (ReleaseSemaphore (mSmmMpSyncData->Counter) == 0) {\r
1135 //\r
1136 // BSP has already ended the synchronization, so QUIT!!!\r
1137 //\r
1138\r
1139 //\r
1140 // Wait for BSP's signal to finish SMI\r
1141 //\r
1142 while (*mSmmMpSyncData->AllCpusInSync) {\r
1143 CpuPause ();\r
1144 }\r
1145 goto Exit;\r
1146 } else {\r
1147\r
1148 //\r
1149 // The BUSY lock is initialized to Released state.\r
1150 // This needs to be done early enough to be ready for BSP's SmmStartupThisAp() call.\r
1151 // E.g., with Relaxed AP flow, SmmStartupThisAp() may be called immediately\r
1152 // after AP's present flag is detected.\r
1153 //\r
1154 InitializeSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
1155 }\r
1156\r
1157 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r
1158 ActivateSmmProfile (CpuIndex);\r
1159 }\r
1160\r
1161 if (BspInProgress) {\r
1162 //\r
1163 // BSP has been elected. Follow AP path, regardless of ValidSmi flag\r
1164 // as BSP may have cleared the SMI status\r
1165 //\r
1166 APHandler (CpuIndex, ValidSmi, mSmmMpSyncData->EffectiveSyncMode);\r
1167 } else {\r
1168 //\r
1169 // We have a valid SMI\r
1170 //\r
1171\r
1172 //\r
1173 // Elect BSP\r
1174 //\r
1175 IsBsp = FALSE;\r
1176 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {\r
1177 if (!mSmmMpSyncData->SwitchBsp || mSmmMpSyncData->CandidateBsp[CpuIndex]) {\r
1178 //\r
1179 // Call platform hook to do BSP election\r
1180 //\r
1181 Status = PlatformSmmBspElection (&IsBsp);\r
1182 if (EFI_SUCCESS == Status) {\r
1183 //\r
1184 // Platform hook determines successfully\r
1185 //\r
1186 if (IsBsp) {\r
1187 mSmmMpSyncData->BspIndex = (UINT32)CpuIndex;\r
1188 }\r
1189 } else {\r
1190 //\r
1191 // Platform hook fails to determine, use default BSP election method\r
1192 //\r
1193 InterlockedCompareExchange32 (\r
1194 (UINT32*)&mSmmMpSyncData->BspIndex,\r
1195 (UINT32)-1,\r
1196 (UINT32)CpuIndex\r
1197 );\r
1198 }\r
1199 }\r
1200 }\r
1201\r
1202 //\r
1203 // "mSmmMpSyncData->BspIndex == CpuIndex" means this is the BSP\r
1204 //\r
1205 if (mSmmMpSyncData->BspIndex == CpuIndex) {\r
1206\r
1207 //\r
1208 // Clear last request for SwitchBsp.\r
1209 //\r
1210 if (mSmmMpSyncData->SwitchBsp) {\r
1211 mSmmMpSyncData->SwitchBsp = FALSE;\r
1212 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
1213 mSmmMpSyncData->CandidateBsp[Index] = FALSE;\r
1214 }\r
1215 }\r
1216\r
1217 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r
1218 SmmProfileRecordSmiNum ();\r
1219 }\r
1220\r
1221 //\r
1222 // BSP Handler is always called with a ValidSmi == TRUE\r
1223 //\r
1224 BSPHandler (CpuIndex, mSmmMpSyncData->EffectiveSyncMode);\r
1225 } else {\r
1226 APHandler (CpuIndex, ValidSmi, mSmmMpSyncData->EffectiveSyncMode);\r
1227 }\r
1228 }\r
1229\r
1230 ASSERT (*mSmmMpSyncData->CpuData[CpuIndex].Run == 0);\r
1231\r
1232 //\r
1233 // Wait for BSP's signal to exit SMI\r
1234 //\r
1235 while (*mSmmMpSyncData->AllCpusInSync) {\r
1236 CpuPause ();\r
1237 }\r
1238 }\r
1239\r
1240Exit:\r
1241 SmmCpuFeaturesRendezvousExit (CpuIndex);\r
1242 //\r
1243 // Restore Cr2\r
1244 //\r
1245 AsmWriteCr2 (Cr2);\r
1246}\r
1247\r
1248/**\r
1249 Allocate buffer for all semaphores and spin locks.\r
1250\r
1251**/\r
1252VOID\r
1253InitializeSmmCpuSemaphores (\r
1254 VOID\r
1255 )\r
1256{\r
1257 UINTN ProcessorCount;\r
1258 UINTN TotalSize;\r
1259 UINTN GlobalSemaphoresSize;\r
1260 UINTN CpuSemaphoresSize;\r
1261 UINTN MsrSemahporeSize;\r
1262 UINTN SemaphoreSize;\r
1263 UINTN Pages;\r
1264 UINTN *SemaphoreBlock;\r
1265 UINTN SemaphoreAddr;\r
1266\r
1267 SemaphoreSize = GetSpinLockProperties ();\r
1268 ProcessorCount = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;\r
1269 GlobalSemaphoresSize = (sizeof (SMM_CPU_SEMAPHORE_GLOBAL) / sizeof (VOID *)) * SemaphoreSize;\r
1270 CpuSemaphoresSize = (sizeof (SMM_CPU_SEMAPHORE_CPU) / sizeof (VOID *)) * ProcessorCount * SemaphoreSize;\r
1271 MsrSemahporeSize = MSR_SPIN_LOCK_INIT_NUM * SemaphoreSize;\r
1272 TotalSize = GlobalSemaphoresSize + CpuSemaphoresSize + MsrSemahporeSize;\r
1273 DEBUG((EFI_D_INFO, "One Semaphore Size = 0x%x\n", SemaphoreSize));\r
1274 DEBUG((EFI_D_INFO, "Total Semaphores Size = 0x%x\n", TotalSize));\r
1275 Pages = EFI_SIZE_TO_PAGES (TotalSize);\r
1276 SemaphoreBlock = AllocatePages (Pages);\r
1277 ASSERT (SemaphoreBlock != NULL);\r
1278 ZeroMem (SemaphoreBlock, TotalSize);\r
1279\r
1280 SemaphoreAddr = (UINTN)SemaphoreBlock;\r
1281 mSmmCpuSemaphores.SemaphoreGlobal.Counter = (UINT32 *)SemaphoreAddr;\r
1282 SemaphoreAddr += SemaphoreSize;\r
1283 mSmmCpuSemaphores.SemaphoreGlobal.InsideSmm = (BOOLEAN *)SemaphoreAddr;\r
1284 SemaphoreAddr += SemaphoreSize;\r
1285 mSmmCpuSemaphores.SemaphoreGlobal.AllCpusInSync = (BOOLEAN *)SemaphoreAddr;\r
1286 SemaphoreAddr += SemaphoreSize;\r
1287 mSmmCpuSemaphores.SemaphoreGlobal.PFLock = (SPIN_LOCK *)SemaphoreAddr;\r
1288 SemaphoreAddr += SemaphoreSize;\r
1289 mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock\r
1290 = (SPIN_LOCK *)SemaphoreAddr;\r
1291 SemaphoreAddr += SemaphoreSize;\r
1292 mSmmCpuSemaphores.SemaphoreGlobal.MemoryMappedLock\r
1293 = (SPIN_LOCK *)SemaphoreAddr;\r
1294\r
1295 SemaphoreAddr = (UINTN)SemaphoreBlock + GlobalSemaphoresSize;\r
1296 mSmmCpuSemaphores.SemaphoreCpu.Busy = (SPIN_LOCK *)SemaphoreAddr;\r
1297 SemaphoreAddr += ProcessorCount * SemaphoreSize;\r
1298 mSmmCpuSemaphores.SemaphoreCpu.Run = (UINT32 *)SemaphoreAddr;\r
1299 SemaphoreAddr += ProcessorCount * SemaphoreSize;\r
1300 mSmmCpuSemaphores.SemaphoreCpu.Present = (BOOLEAN *)SemaphoreAddr;\r
1301\r
1302 SemaphoreAddr = (UINTN)SemaphoreBlock + GlobalSemaphoresSize + CpuSemaphoresSize;\r
1303 mSmmCpuSemaphores.SemaphoreMsr.Msr = (SPIN_LOCK *)SemaphoreAddr;\r
1304 mSmmCpuSemaphores.SemaphoreMsr.AvailableCounter =\r
1305 ((UINTN)SemaphoreBlock + Pages * SIZE_4KB - SemaphoreAddr) / SemaphoreSize;\r
1306 ASSERT (mSmmCpuSemaphores.SemaphoreMsr.AvailableCounter >= MSR_SPIN_LOCK_INIT_NUM);\r
1307\r
1308 mPFLock = mSmmCpuSemaphores.SemaphoreGlobal.PFLock;\r
1309 mConfigSmmCodeAccessCheckLock = mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock;\r
1310 mMemoryMappedLock = mSmmCpuSemaphores.SemaphoreGlobal.MemoryMappedLock;\r
1311\r
1312 mSemaphoreSize = SemaphoreSize;\r
1313}\r
1314\r
1315/**\r
1316 Initialize un-cacheable data.\r
1317\r
1318**/\r
1319VOID\r
1320EFIAPI\r
1321InitializeMpSyncData (\r
1322 VOID\r
1323 )\r
1324{\r
1325 UINTN CpuIndex;\r
1326\r
1327 if (mSmmMpSyncData != NULL) {\r
1328 //\r
1329 // mSmmMpSyncDataSize includes one structure of SMM_DISPATCHER_MP_SYNC_DATA, one\r
1330 // CpuData array of SMM_CPU_DATA_BLOCK and one CandidateBsp array of BOOLEAN.\r
1331 //\r
1332 ZeroMem (mSmmMpSyncData, mSmmMpSyncDataSize);\r
1333 mSmmMpSyncData->CpuData = (SMM_CPU_DATA_BLOCK *)((UINT8 *)mSmmMpSyncData + sizeof (SMM_DISPATCHER_MP_SYNC_DATA));\r
1334 mSmmMpSyncData->CandidateBsp = (BOOLEAN *)(mSmmMpSyncData->CpuData + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus);\r
1335 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {\r
1336 //\r
1337 // Enable BSP election by setting BspIndex to -1\r
1338 //\r
1339 mSmmMpSyncData->BspIndex = (UINT32)-1;\r
1340 }\r
1341 mSmmMpSyncData->EffectiveSyncMode = (SMM_CPU_SYNC_MODE) PcdGet8 (PcdCpuSmmSyncMode);\r
1342\r
1343 mSmmMpSyncData->Counter = mSmmCpuSemaphores.SemaphoreGlobal.Counter;\r
1344 mSmmMpSyncData->InsideSmm = mSmmCpuSemaphores.SemaphoreGlobal.InsideSmm;\r
1345 mSmmMpSyncData->AllCpusInSync = mSmmCpuSemaphores.SemaphoreGlobal.AllCpusInSync;\r
1346 ASSERT (mSmmMpSyncData->Counter != NULL && mSmmMpSyncData->InsideSmm != NULL &&\r
1347 mSmmMpSyncData->AllCpusInSync != NULL);\r
1348 *mSmmMpSyncData->Counter = 0;\r
1349 *mSmmMpSyncData->InsideSmm = FALSE;\r
1350 *mSmmMpSyncData->AllCpusInSync = FALSE;\r
1351\r
1352 for (CpuIndex = 0; CpuIndex < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; CpuIndex ++) {\r
1353 mSmmMpSyncData->CpuData[CpuIndex].Busy =\r
1354 (SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Busy + mSemaphoreSize * CpuIndex);\r
1355 mSmmMpSyncData->CpuData[CpuIndex].Run =\r
1356 (UINT32 *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Run + mSemaphoreSize * CpuIndex);\r
1357 mSmmMpSyncData->CpuData[CpuIndex].Present =\r
1358 (BOOLEAN *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Present + mSemaphoreSize * CpuIndex);\r
1359 }\r
1360 }\r
1361}\r
1362\r
1363/**\r
1364 Initialize global data for MP synchronization.\r
1365\r
1366 @param Stacks Base address of SMI stack buffer for all processors.\r
1367 @param StackSize Stack size for each processor in SMM.\r
1368\r
1369**/\r
1370UINT32\r
1371InitializeMpServiceData (\r
1372 IN VOID *Stacks,\r
1373 IN UINTN StackSize\r
1374 )\r
1375{\r
1376 UINT32 Cr3;\r
1377 UINTN Index;\r
1378 MTRR_SETTINGS *Mtrr;\r
1379 PROCESSOR_SMM_DESCRIPTOR *Psd;\r
1380 UINT8 *GdtTssTables;\r
1381 UINTN GdtTableStepSize;\r
1382\r
1383 //\r
1384 // Allocate memory for all locks and semaphores\r
1385 //\r
1386 InitializeSmmCpuSemaphores ();\r
1387\r
1388 //\r
1389 // Initialize mSmmMpSyncData\r
1390 //\r
1391 mSmmMpSyncDataSize = sizeof (SMM_DISPATCHER_MP_SYNC_DATA) +\r
1392 (sizeof (SMM_CPU_DATA_BLOCK) + sizeof (BOOLEAN)) * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;\r
1393 mSmmMpSyncData = (SMM_DISPATCHER_MP_SYNC_DATA*) AllocatePages (EFI_SIZE_TO_PAGES (mSmmMpSyncDataSize));\r
1394 ASSERT (mSmmMpSyncData != NULL);\r
1395 InitializeMpSyncData ();\r
1396\r
1397 //\r
1398 // Initialize physical address mask\r
1399 // NOTE: Physical memory above virtual address limit is not supported !!!\r
1400 //\r
1401 AsmCpuid (0x80000008, (UINT32*)&Index, NULL, NULL, NULL);\r
1402 gPhyMask = LShiftU64 (1, (UINT8)Index) - 1;\r
1403 gPhyMask &= (1ull << 48) - EFI_PAGE_SIZE;\r
1404\r
1405 //\r
1406 // Create page tables\r
1407 //\r
1408 Cr3 = SmmInitPageTable ();\r
1409\r
1410 GdtTssTables = InitGdt (Cr3, &GdtTableStepSize);\r
1411\r
1412 //\r
1413 // Initialize PROCESSOR_SMM_DESCRIPTOR for each CPU\r
1414 //\r
1415 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
1416 Psd = (PROCESSOR_SMM_DESCRIPTOR *)(VOID *)(UINTN)(mCpuHotPlugData.SmBase[Index] + SMM_PSD_OFFSET);\r
1417 CopyMem (Psd, &gcPsd, sizeof (gcPsd));\r
1418 Psd->SmmGdtPtr = (UINT64)(UINTN)(GdtTssTables + GdtTableStepSize * Index);\r
1419 Psd->SmmGdtSize = gcSmiGdtr.Limit + 1;\r
1420\r
1421 //\r
1422 // Install SMI handler\r
1423 //\r
1424 InstallSmiHandler (\r
1425 Index,\r
1426 (UINT32)mCpuHotPlugData.SmBase[Index],\r
1427 (VOID*)((UINTN)Stacks + (StackSize * Index)),\r
1428 StackSize,\r
1429 (UINTN)Psd->SmmGdtPtr,\r
1430 Psd->SmmGdtSize,\r
1431 gcSmiIdtr.Base,\r
1432 gcSmiIdtr.Limit + 1,\r
1433 Cr3\r
1434 );\r
1435 }\r
1436\r
1437 //\r
1438 // Record current MTRR settings\r
1439 //\r
1440 ZeroMem(gSmiMtrrs, sizeof (gSmiMtrrs));\r
1441 Mtrr = (MTRR_SETTINGS*)gSmiMtrrs;\r
1442 MtrrGetAllMtrrs (Mtrr);\r
1443\r
1444 return Cr3;\r
1445}\r
1446\r
1447/**\r
1448\r
1449 Register the SMM Foundation entry point.\r
1450\r
1451 @param This Pointer to EFI_SMM_CONFIGURATION_PROTOCOL instance\r
1452 @param SmmEntryPoint SMM Foundation EntryPoint\r
1453\r
1454 @retval EFI_SUCCESS Successfully to register SMM foundation entry point\r
1455\r
1456**/\r
1457EFI_STATUS\r
1458EFIAPI\r
1459RegisterSmmEntry (\r
1460 IN CONST EFI_SMM_CONFIGURATION_PROTOCOL *This,\r
1461 IN EFI_SMM_ENTRY_POINT SmmEntryPoint\r
1462 )\r
1463{\r
1464 //\r
1465 // Record SMM Foundation EntryPoint, later invoke it on SMI entry vector.\r
1466 //\r
1467 gSmmCpuPrivate->SmmCoreEntry = SmmEntryPoint;\r
1468 return EFI_SUCCESS;\r
1469}\r