]> git.proxmox.com Git - mirror_edk2.git/blame - UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c
UefiCpuPkg/PiSmmCpuDxeSmm: Enable/Restore XD in SMM
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / MpService.c
CommitLineData
529a5a86
MK
1/** @file\r
2SMM MP service implementation\r
3\r
e1695f8d 4Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.<BR>\r
529a5a86
MK
5This program and the accompanying materials\r
6are licensed and made available under the terms and conditions of the BSD License\r
7which accompanies this distribution. The full text of the license may be found at\r
8http://opensource.org/licenses/bsd-license.php\r
9\r
10THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
11WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
12\r
13**/\r
14\r
15#include "PiSmmCpuDxeSmm.h"\r
16\r
17//\r
18// Slots for all MTRR( FIXED MTRR + VARIABLE MTRR + MTRR_LIB_IA32_MTRR_DEF_TYPE)\r
19//\r
20UINT64 gSmiMtrrs[MTRR_NUMBER_OF_FIXED_MTRR + 2 * MTRR_NUMBER_OF_VARIABLE_MTRR + 1];\r
21UINT64 gPhyMask;\r
22SMM_DISPATCHER_MP_SYNC_DATA *mSmmMpSyncData = NULL;\r
23UINTN mSmmMpSyncDataSize;\r
24\r
25/**\r
26 Performs an atomic compare exchange operation to get semaphore.\r
27 The compare exchange operation must be performed using\r
28 MP safe mechanisms.\r
29\r
30 @param Sem IN: 32-bit unsigned integer\r
31 OUT: original integer - 1\r
32 @return Original integer - 1\r
33\r
34**/\r
35UINT32\r
36WaitForSemaphore (\r
37 IN OUT volatile UINT32 *Sem\r
38 )\r
39{\r
40 UINT32 Value;\r
41\r
42 do {\r
43 Value = *Sem;\r
44 } while (Value == 0 ||\r
45 InterlockedCompareExchange32 (\r
46 (UINT32*)Sem,\r
47 Value,\r
48 Value - 1\r
49 ) != Value);\r
50 return Value - 1;\r
51}\r
52\r
53\r
54/**\r
55 Performs an atomic compare exchange operation to release semaphore.\r
56 The compare exchange operation must be performed using\r
57 MP safe mechanisms.\r
58\r
59 @param Sem IN: 32-bit unsigned integer\r
60 OUT: original integer + 1\r
61 @return Original integer + 1\r
62\r
63**/\r
64UINT32\r
65ReleaseSemaphore (\r
66 IN OUT volatile UINT32 *Sem\r
67 )\r
68{\r
69 UINT32 Value;\r
70\r
71 do {\r
72 Value = *Sem;\r
73 } while (Value + 1 != 0 &&\r
74 InterlockedCompareExchange32 (\r
75 (UINT32*)Sem,\r
76 Value,\r
77 Value + 1\r
78 ) != Value);\r
79 return Value + 1;\r
80}\r
81\r
82/**\r
83 Performs an atomic compare exchange operation to lock semaphore.\r
84 The compare exchange operation must be performed using\r
85 MP safe mechanisms.\r
86\r
87 @param Sem IN: 32-bit unsigned integer\r
88 OUT: -1\r
89 @return Original integer\r
90\r
91**/\r
92UINT32\r
93LockdownSemaphore (\r
94 IN OUT volatile UINT32 *Sem\r
95 )\r
96{\r
97 UINT32 Value;\r
98\r
99 do {\r
100 Value = *Sem;\r
101 } while (InterlockedCompareExchange32 (\r
102 (UINT32*)Sem,\r
103 Value, (UINT32)-1\r
104 ) != Value);\r
105 return Value;\r
106}\r
107\r
108/**\r
109 Wait all APs to performs an atomic compare exchange operation to release semaphore.\r
110\r
111 @param NumberOfAPs AP number\r
112\r
113**/\r
114VOID\r
115WaitForAllAPs (\r
116 IN UINTN NumberOfAPs\r
117 )\r
118{\r
119 UINTN BspIndex;\r
120\r
121 BspIndex = mSmmMpSyncData->BspIndex;\r
122 while (NumberOfAPs-- > 0) {\r
123 WaitForSemaphore (&mSmmMpSyncData->CpuData[BspIndex].Run);\r
124 }\r
125}\r
126\r
127/**\r
128 Performs an atomic compare exchange operation to release semaphore\r
129 for each AP.\r
130\r
131**/\r
132VOID\r
133ReleaseAllAPs (\r
134 VOID\r
135 )\r
136{\r
137 UINTN Index;\r
138 UINTN BspIndex;\r
139\r
140 BspIndex = mSmmMpSyncData->BspIndex;\r
141 for (Index = mMaxNumberOfCpus; Index-- > 0;) {\r
142 if (Index != BspIndex && mSmmMpSyncData->CpuData[Index].Present) {\r
143 ReleaseSemaphore (&mSmmMpSyncData->CpuData[Index].Run);\r
144 }\r
145 }\r
146}\r
147\r
148/**\r
149 Checks if all CPUs (with certain exceptions) have checked in for this SMI run\r
150\r
151 @param Exceptions CPU Arrival exception flags.\r
152\r
153 @retval TRUE if all CPUs the have checked in.\r
154 @retval FALSE if at least one Normal AP hasn't checked in.\r
155\r
156**/\r
157BOOLEAN\r
158AllCpusInSmmWithExceptions (\r
159 SMM_CPU_ARRIVAL_EXCEPTIONS Exceptions\r
160 )\r
161{\r
162 UINTN Index;\r
163 SMM_CPU_DATA_BLOCK *CpuData;\r
164 EFI_PROCESSOR_INFORMATION *ProcessorInfo;\r
165\r
166 ASSERT (mSmmMpSyncData->Counter <= mNumberOfCpus);\r
167\r
168 if (mSmmMpSyncData->Counter == mNumberOfCpus) {\r
169 return TRUE;\r
170 }\r
171\r
172 CpuData = mSmmMpSyncData->CpuData;\r
173 ProcessorInfo = gSmmCpuPrivate->ProcessorInfo;\r
174 for (Index = mMaxNumberOfCpus; Index-- > 0;) {\r
175 if (!CpuData[Index].Present && ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID) {\r
176 if (((Exceptions & ARRIVAL_EXCEPTION_DELAYED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmDelayed) != 0) {\r
177 continue;\r
178 }\r
179 if (((Exceptions & ARRIVAL_EXCEPTION_BLOCKED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmBlocked) != 0) {\r
180 continue;\r
181 }\r
182 if (((Exceptions & ARRIVAL_EXCEPTION_SMI_DISABLED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmEnable) != 0) {\r
183 continue;\r
184 }\r
185 return FALSE;\r
186 }\r
187 }\r
188\r
189\r
190 return TRUE;\r
191}\r
192\r
193\r
194/**\r
195 Given timeout constraint, wait for all APs to arrive, and insure when this function returns, no AP will execute normal mode code before\r
196 entering SMM, except SMI disabled APs.\r
197\r
198**/\r
199VOID\r
200SmmWaitForApArrival (\r
201 VOID\r
202 )\r
203{\r
204 UINT64 Timer;\r
205 UINTN Index;\r
206\r
207 ASSERT (mSmmMpSyncData->Counter <= mNumberOfCpus);\r
208\r
209 //\r
210 // Platform implementor should choose a timeout value appropriately:\r
211 // - The timeout value should balance the SMM time constrains and the likelihood that delayed CPUs are excluded in the SMM run. Note\r
212 // the SMI Handlers must ALWAYS take into account the cases that not all APs are available in an SMI run.\r
213 // - The timeout value must, in the case of 2nd timeout, be at least long enough to give time for all APs to receive the SMI IPI\r
214 // and either enter SMM or buffer the SMI, to insure there is no CPU running normal mode code when SMI handling starts. This will\r
215 // be TRUE even if a blocked CPU is brought out of the blocked state by a normal mode CPU (before the normal mode CPU received the\r
216 // SMI IPI), because with a buffered SMI, and CPU will enter SMM immediately after it is brought out of the blocked state.\r
217 // - The timeout value must be longer than longest possible IO operation in the system\r
218 //\r
219\r
220 //\r
221 // Sync with APs 1st timeout\r
222 //\r
223 for (Timer = StartSyncTimer ();\r
224 !IsSyncTimerTimeout (Timer) &&\r
225 !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED | ARRIVAL_EXCEPTION_SMI_DISABLED );\r
226 ) {\r
227 CpuPause ();\r
228 }\r
229\r
230 //\r
231 // Not all APs have arrived, so we need 2nd round of timeout. IPIs should be sent to ALL none present APs,\r
232 // because:\r
233 // a) Delayed AP may have just come out of the delayed state. Blocked AP may have just been brought out of blocked state by some AP running\r
234 // normal mode code. These APs need to be guaranteed to have an SMI pending to insure that once they are out of delayed / blocked state, they\r
235 // enter SMI immediately without executing instructions in normal mode. Note traditional flow requires there are no APs doing normal mode\r
236 // work while SMI handling is on-going.\r
237 // b) As a consequence of SMI IPI sending, (spurious) SMI may occur after this SMM run.\r
238 // c) ** NOTE **: Use SMI disabling feature VERY CAREFULLY (if at all) for traditional flow, because a processor in SMI-disabled state\r
239 // will execute normal mode code, which breaks the traditional SMI handlers' assumption that no APs are doing normal\r
240 // mode work while SMI handling is on-going.\r
241 // d) We don't add code to check SMI disabling status to skip sending IPI to SMI disabled APs, because:\r
242 // - In traditional flow, SMI disabling is discouraged.\r
243 // - In relaxed flow, CheckApArrival() will check SMI disabling status before calling this function.\r
244 // In both cases, adding SMI-disabling checking code increases overhead.\r
245 //\r
246 if (mSmmMpSyncData->Counter < mNumberOfCpus) {\r
247 //\r
248 // Send SMI IPIs to bring outside processors in\r
249 //\r
250 for (Index = mMaxNumberOfCpus; Index-- > 0;) {\r
251 if (!mSmmMpSyncData->CpuData[Index].Present && gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID) {\r
252 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId);\r
253 }\r
254 }\r
255\r
256 //\r
257 // Sync with APs 2nd timeout.\r
258 //\r
259 for (Timer = StartSyncTimer ();\r
260 !IsSyncTimerTimeout (Timer) &&\r
261 !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED | ARRIVAL_EXCEPTION_SMI_DISABLED );\r
262 ) {\r
263 CpuPause ();\r
264 }\r
265 }\r
266\r
267 return;\r
268}\r
269\r
270\r
271/**\r
272 Replace OS MTRR's with SMI MTRR's.\r
273\r
274 @param CpuIndex Processor Index\r
275\r
276**/\r
277VOID\r
278ReplaceOSMtrrs (\r
279 IN UINTN CpuIndex\r
280 )\r
281{\r
282 PROCESSOR_SMM_DESCRIPTOR *Psd;\r
283 UINT64 *SmiMtrrs;\r
284 MTRR_SETTINGS *BiosMtrr;\r
285\r
286 Psd = (PROCESSOR_SMM_DESCRIPTOR*)(mCpuHotPlugData.SmBase[CpuIndex] + SMM_PSD_OFFSET);\r
287 SmiMtrrs = (UINT64*)(UINTN)Psd->MtrrBaseMaskPtr;\r
288\r
289 SmmCpuFeaturesDisableSmrr ();\r
290\r
291 //\r
292 // Replace all MTRRs registers\r
293 //\r
294 BiosMtrr = (MTRR_SETTINGS*)SmiMtrrs;\r
295 MtrrSetAllMtrrs(BiosMtrr);\r
296}\r
297\r
298/**\r
299 SMI handler for BSP.\r
300\r
301 @param CpuIndex BSP processor Index\r
302 @param SyncMode SMM MP sync mode\r
303\r
304**/\r
305VOID\r
306BSPHandler (\r
307 IN UINTN CpuIndex,\r
308 IN SMM_CPU_SYNC_MODE SyncMode\r
309 )\r
310{\r
311 UINTN Index;\r
312 MTRR_SETTINGS Mtrrs;\r
313 UINTN ApCount;\r
314 BOOLEAN ClearTopLevelSmiResult;\r
315 UINTN PresentCount;\r
316\r
317 ASSERT (CpuIndex == mSmmMpSyncData->BspIndex);\r
318 ApCount = 0;\r
319\r
320 //\r
321 // Flag BSP's presence\r
322 //\r
323 mSmmMpSyncData->InsideSmm = TRUE;\r
324\r
325 //\r
326 // Initialize Debug Agent to start source level debug in BSP handler\r
327 //\r
328 InitializeDebugAgent (DEBUG_AGENT_INIT_ENTER_SMI, NULL, NULL);\r
329\r
330 //\r
331 // Mark this processor's presence\r
332 //\r
333 mSmmMpSyncData->CpuData[CpuIndex].Present = TRUE;\r
334\r
335 //\r
336 // Clear platform top level SMI status bit before calling SMI handlers. If\r
337 // we cleared it after SMI handlers are run, we would miss the SMI that\r
338 // occurs after SMI handlers are done and before SMI status bit is cleared.\r
339 //\r
340 ClearTopLevelSmiResult = ClearTopLevelSmiStatus();\r
341 ASSERT (ClearTopLevelSmiResult == TRUE);\r
342\r
343 //\r
344 // Set running processor index\r
345 //\r
346 gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu = CpuIndex;\r
347\r
348 //\r
349 // If Traditional Sync Mode or need to configure MTRRs: gather all available APs.\r
350 //\r
351 if (SyncMode == SmmCpuSyncModeTradition || SmmCpuFeaturesNeedConfigureMtrrs()) {\r
352\r
353 //\r
354 // Wait for APs to arrive\r
355 //\r
356 SmmWaitForApArrival();\r
357\r
358 //\r
359 // Lock the counter down and retrieve the number of APs\r
360 //\r
361 mSmmMpSyncData->AllCpusInSync = TRUE;\r
362 ApCount = LockdownSemaphore (&mSmmMpSyncData->Counter) - 1;\r
363\r
364 //\r
365 // Wait for all APs to get ready for programming MTRRs\r
366 //\r
367 WaitForAllAPs (ApCount);\r
368\r
369 if (SmmCpuFeaturesNeedConfigureMtrrs()) {\r
370 //\r
371 // Signal all APs it's time for backup MTRRs\r
372 //\r
373 ReleaseAllAPs ();\r
374\r
375 //\r
376 // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at\r
377 // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set\r
378 // to a large enough value to avoid this situation.\r
379 // Note: For HT capable CPUs, threads within a core share the same set of MTRRs.\r
380 // We do the backup first and then set MTRR to avoid race condition for threads\r
381 // in the same core.\r
382 //\r
383 MtrrGetAllMtrrs(&Mtrrs);\r
384\r
385 //\r
386 // Wait for all APs to complete their MTRR saving\r
387 //\r
388 WaitForAllAPs (ApCount);\r
389\r
390 //\r
391 // Let all processors program SMM MTRRs together\r
392 //\r
393 ReleaseAllAPs ();\r
394\r
395 //\r
396 // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at\r
397 // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set\r
398 // to a large enough value to avoid this situation.\r
399 //\r
400 ReplaceOSMtrrs (CpuIndex);\r
401\r
402 //\r
403 // Wait for all APs to complete their MTRR programming\r
404 //\r
405 WaitForAllAPs (ApCount);\r
406 }\r
407 }\r
408\r
409 //\r
410 // The BUSY lock is initialized to Acquired state\r
411 //\r
412 AcquireSpinLockOrFail (&mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
413\r
414 //\r
9f419739 415 // Perform the pre tasks\r
529a5a86 416 //\r
9f419739 417 PerformPreTasks ();\r
529a5a86
MK
418\r
419 //\r
420 // Invoke SMM Foundation EntryPoint with the processor information context.\r
421 //\r
422 gSmmCpuPrivate->SmmCoreEntry (&gSmmCpuPrivate->SmmCoreEntryContext);\r
423\r
424 //\r
425 // Make sure all APs have completed their pending none-block tasks\r
426 //\r
427 for (Index = mMaxNumberOfCpus; Index-- > 0;) {\r
428 if (Index != CpuIndex && mSmmMpSyncData->CpuData[Index].Present) {\r
429 AcquireSpinLock (&mSmmMpSyncData->CpuData[Index].Busy);\r
430 ReleaseSpinLock (&mSmmMpSyncData->CpuData[Index].Busy);;\r
431 }\r
432 }\r
433\r
434 //\r
435 // Perform the remaining tasks\r
436 //\r
437 PerformRemainingTasks ();\r
438\r
439 //\r
440 // If Relaxed-AP Sync Mode: gather all available APs after BSP SMM handlers are done, and\r
441 // make those APs to exit SMI synchronously. APs which arrive later will be excluded and\r
442 // will run through freely.\r
443 //\r
444 if (SyncMode != SmmCpuSyncModeTradition && !SmmCpuFeaturesNeedConfigureMtrrs()) {\r
445\r
446 //\r
447 // Lock the counter down and retrieve the number of APs\r
448 //\r
449 mSmmMpSyncData->AllCpusInSync = TRUE;\r
450 ApCount = LockdownSemaphore (&mSmmMpSyncData->Counter) - 1;\r
451 //\r
452 // Make sure all APs have their Present flag set\r
453 //\r
454 while (TRUE) {\r
455 PresentCount = 0;\r
456 for (Index = mMaxNumberOfCpus; Index-- > 0;) {\r
457 if (mSmmMpSyncData->CpuData[Index].Present) {\r
458 PresentCount ++;\r
459 }\r
460 }\r
461 if (PresentCount > ApCount) {\r
462 break;\r
463 }\r
464 }\r
465 }\r
466\r
467 //\r
468 // Notify all APs to exit\r
469 //\r
470 mSmmMpSyncData->InsideSmm = FALSE;\r
471 ReleaseAllAPs ();\r
472\r
473 //\r
474 // Wait for all APs to complete their pending tasks\r
475 //\r
476 WaitForAllAPs (ApCount);\r
477\r
478 if (SmmCpuFeaturesNeedConfigureMtrrs()) {\r
479 //\r
480 // Signal APs to restore MTRRs\r
481 //\r
482 ReleaseAllAPs ();\r
483\r
484 //\r
485 // Restore OS MTRRs\r
486 //\r
487 SmmCpuFeaturesReenableSmrr ();\r
488 MtrrSetAllMtrrs(&Mtrrs);\r
489\r
490 //\r
491 // Wait for all APs to complete MTRR programming\r
492 //\r
493 WaitForAllAPs (ApCount);\r
494 }\r
495\r
496 //\r
497 // Stop source level debug in BSP handler, the code below will not be\r
498 // debugged.\r
499 //\r
500 InitializeDebugAgent (DEBUG_AGENT_INIT_EXIT_SMI, NULL, NULL);\r
501\r
502 //\r
503 // Signal APs to Reset states/semaphore for this processor\r
504 //\r
505 ReleaseAllAPs ();\r
506\r
507 //\r
508 // Perform pending operations for hot-plug\r
509 //\r
510 SmmCpuUpdate ();\r
511\r
512 //\r
513 // Clear the Present flag of BSP\r
514 //\r
515 mSmmMpSyncData->CpuData[CpuIndex].Present = FALSE;\r
516\r
517 //\r
518 // Gather APs to exit SMM synchronously. Note the Present flag is cleared by now but\r
519 // WaitForAllAps does not depend on the Present flag.\r
520 //\r
521 WaitForAllAPs (ApCount);\r
522\r
523 //\r
524 // Reset BspIndex to -1, meaning BSP has not been elected.\r
525 //\r
526 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {\r
527 mSmmMpSyncData->BspIndex = (UINT32)-1;\r
528 }\r
529\r
530 //\r
531 // Allow APs to check in from this point on\r
532 //\r
533 mSmmMpSyncData->Counter = 0;\r
534 mSmmMpSyncData->AllCpusInSync = FALSE;\r
535}\r
536\r
537/**\r
538 SMI handler for AP.\r
539\r
540 @param CpuIndex AP processor Index.\r
541 @param ValidSmi Indicates that current SMI is a valid SMI or not.\r
542 @param SyncMode SMM MP sync mode.\r
543\r
544**/\r
545VOID\r
546APHandler (\r
547 IN UINTN CpuIndex,\r
548 IN BOOLEAN ValidSmi,\r
549 IN SMM_CPU_SYNC_MODE SyncMode\r
550 )\r
551{\r
552 UINT64 Timer;\r
553 UINTN BspIndex;\r
554 MTRR_SETTINGS Mtrrs;\r
555\r
556 //\r
557 // Timeout BSP\r
558 //\r
559 for (Timer = StartSyncTimer ();\r
560 !IsSyncTimerTimeout (Timer) &&\r
561 !mSmmMpSyncData->InsideSmm;\r
562 ) {\r
563 CpuPause ();\r
564 }\r
565\r
566 if (!mSmmMpSyncData->InsideSmm) {\r
567 //\r
568 // BSP timeout in the first round\r
569 //\r
570 if (mSmmMpSyncData->BspIndex != -1) {\r
571 //\r
572 // BSP Index is known\r
573 //\r
574 BspIndex = mSmmMpSyncData->BspIndex;\r
575 ASSERT (CpuIndex != BspIndex);\r
576\r
577 //\r
578 // Send SMI IPI to bring BSP in\r
579 //\r
580 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[BspIndex].ProcessorId);\r
581\r
582 //\r
583 // Now clock BSP for the 2nd time\r
584 //\r
585 for (Timer = StartSyncTimer ();\r
586 !IsSyncTimerTimeout (Timer) &&\r
587 !mSmmMpSyncData->InsideSmm;\r
588 ) {\r
589 CpuPause ();\r
590 }\r
591\r
592 if (!mSmmMpSyncData->InsideSmm) {\r
593 //\r
594 // Give up since BSP is unable to enter SMM\r
595 // and signal the completion of this AP\r
596 WaitForSemaphore (&mSmmMpSyncData->Counter);\r
597 return;\r
598 }\r
599 } else {\r
600 //\r
601 // Don't know BSP index. Give up without sending IPI to BSP.\r
602 //\r
603 WaitForSemaphore (&mSmmMpSyncData->Counter);\r
604 return;\r
605 }\r
606 }\r
607\r
608 //\r
609 // BSP is available\r
610 //\r
611 BspIndex = mSmmMpSyncData->BspIndex;\r
612 ASSERT (CpuIndex != BspIndex);\r
613\r
614 //\r
615 // Mark this processor's presence\r
616 //\r
617 mSmmMpSyncData->CpuData[CpuIndex].Present = TRUE;\r
618\r
619 if (SyncMode == SmmCpuSyncModeTradition || SmmCpuFeaturesNeedConfigureMtrrs()) {\r
620 //\r
621 // Notify BSP of arrival at this point\r
622 //\r
623 ReleaseSemaphore (&mSmmMpSyncData->CpuData[BspIndex].Run);\r
624 }\r
625\r
626 if (SmmCpuFeaturesNeedConfigureMtrrs()) {\r
627 //\r
628 // Wait for the signal from BSP to backup MTRRs\r
629 //\r
630 WaitForSemaphore (&mSmmMpSyncData->CpuData[CpuIndex].Run);\r
631\r
632 //\r
633 // Backup OS MTRRs\r
634 //\r
635 MtrrGetAllMtrrs(&Mtrrs);\r
636\r
637 //\r
638 // Signal BSP the completion of this AP\r
639 //\r
640 ReleaseSemaphore (&mSmmMpSyncData->CpuData[BspIndex].Run);\r
641\r
642 //\r
643 // Wait for BSP's signal to program MTRRs\r
644 //\r
645 WaitForSemaphore (&mSmmMpSyncData->CpuData[CpuIndex].Run);\r
646\r
647 //\r
648 // Replace OS MTRRs with SMI MTRRs\r
649 //\r
650 ReplaceOSMtrrs (CpuIndex);\r
651\r
652 //\r
653 // Signal BSP the completion of this AP\r
654 //\r
655 ReleaseSemaphore (&mSmmMpSyncData->CpuData[BspIndex].Run);\r
656 }\r
657\r
658 while (TRUE) {\r
659 //\r
660 // Wait for something to happen\r
661 //\r
662 WaitForSemaphore (&mSmmMpSyncData->CpuData[CpuIndex].Run);\r
663\r
664 //\r
665 // Check if BSP wants to exit SMM\r
666 //\r
667 if (!mSmmMpSyncData->InsideSmm) {\r
668 break;\r
669 }\r
670\r
671 //\r
672 // BUSY should be acquired by SmmStartupThisAp()\r
673 //\r
674 ASSERT (\r
675 !AcquireSpinLockOrFail (&mSmmMpSyncData->CpuData[CpuIndex].Busy)\r
676 );\r
677\r
678 //\r
679 // Invoke the scheduled procedure\r
680 //\r
681 (*mSmmMpSyncData->CpuData[CpuIndex].Procedure) (\r
682 (VOID*)mSmmMpSyncData->CpuData[CpuIndex].Parameter\r
683 );\r
684\r
685 //\r
686 // Release BUSY\r
687 //\r
688 ReleaseSpinLock (&mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
689 }\r
690\r
691 if (SmmCpuFeaturesNeedConfigureMtrrs()) {\r
692 //\r
693 // Notify BSP the readiness of this AP to program MTRRs\r
694 //\r
695 ReleaseSemaphore (&mSmmMpSyncData->CpuData[BspIndex].Run);\r
696\r
697 //\r
698 // Wait for the signal from BSP to program MTRRs\r
699 //\r
700 WaitForSemaphore (&mSmmMpSyncData->CpuData[CpuIndex].Run);\r
701\r
702 //\r
703 // Restore OS MTRRs\r
704 //\r
705 SmmCpuFeaturesReenableSmrr ();\r
706 MtrrSetAllMtrrs(&Mtrrs);\r
707 }\r
708\r
709 //\r
710 // Notify BSP the readiness of this AP to Reset states/semaphore for this processor\r
711 //\r
712 ReleaseSemaphore (&mSmmMpSyncData->CpuData[BspIndex].Run);\r
713\r
714 //\r
715 // Wait for the signal from BSP to Reset states/semaphore for this processor\r
716 //\r
717 WaitForSemaphore (&mSmmMpSyncData->CpuData[CpuIndex].Run);\r
718\r
719 //\r
720 // Reset states/semaphore for this processor\r
721 //\r
722 mSmmMpSyncData->CpuData[CpuIndex].Present = FALSE;\r
723\r
724 //\r
725 // Notify BSP the readiness of this AP to exit SMM\r
726 //\r
727 ReleaseSemaphore (&mSmmMpSyncData->CpuData[BspIndex].Run);\r
728\r
729}\r
730\r
731/**\r
732 Create 4G PageTable in SMRAM.\r
733\r
734 @param ExtraPages Additional page numbers besides for 4G memory\r
881520ea 735 @param Is32BitPageTable Whether the page table is 32-bit PAE\r
529a5a86
MK
736 @return PageTable Address\r
737\r
738**/\r
739UINT32\r
740Gen4GPageTable (\r
881520ea
JY
741 IN UINTN ExtraPages,\r
742 IN BOOLEAN Is32BitPageTable\r
529a5a86
MK
743 )\r
744{\r
745 VOID *PageTable;\r
746 UINTN Index;\r
747 UINT64 *Pte;\r
748 UINTN PagesNeeded;\r
749 UINTN Low2MBoundary;\r
750 UINTN High2MBoundary;\r
751 UINTN Pages;\r
752 UINTN GuardPage;\r
753 UINT64 *Pdpte;\r
754 UINTN PageIndex;\r
755 UINTN PageAddress;\r
756\r
757 Low2MBoundary = 0;\r
758 High2MBoundary = 0;\r
759 PagesNeeded = 0;\r
760 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
761 //\r
762 // Add one more page for known good stack, then find the lower 2MB aligned address.\r
763 //\r
764 Low2MBoundary = (mSmmStackArrayBase + EFI_PAGE_SIZE) & ~(SIZE_2MB-1);\r
765 //\r
766 // Add two more pages for known good stack and stack guard page,\r
767 // then find the lower 2MB aligned address.\r
768 //\r
769 High2MBoundary = (mSmmStackArrayEnd - mSmmStackSize + EFI_PAGE_SIZE * 2) & ~(SIZE_2MB-1);\r
770 PagesNeeded = ((High2MBoundary - Low2MBoundary) / SIZE_2MB) + 1;\r
771 }\r
772 //\r
773 // Allocate the page table\r
774 //\r
21c17193 775 PageTable = AllocatePageTableMemory (ExtraPages + 5 + PagesNeeded);\r
529a5a86
MK
776 ASSERT (PageTable != NULL);\r
777\r
778 PageTable = (VOID *)((UINTN)PageTable + EFI_PAGES_TO_SIZE (ExtraPages));\r
779 Pte = (UINT64*)PageTable;\r
780\r
781 //\r
782 // Zero out all page table entries first\r
783 //\r
784 ZeroMem (Pte, EFI_PAGES_TO_SIZE (1));\r
785\r
786 //\r
787 // Set Page Directory Pointers\r
788 //\r
789 for (Index = 0; Index < 4; Index++) {\r
881520ea 790 Pte[Index] = (UINTN)PageTable + EFI_PAGE_SIZE * (Index + 1) + (Is32BitPageTable ? IA32_PAE_PDPTE_ATTRIBUTE_BITS : PAGE_ATTRIBUTE_BITS);\r
529a5a86
MK
791 }\r
792 Pte += EFI_PAGE_SIZE / sizeof (*Pte);\r
793\r
794 //\r
795 // Fill in Page Directory Entries\r
796 //\r
797 for (Index = 0; Index < EFI_PAGE_SIZE * 4 / sizeof (*Pte); Index++) {\r
881520ea 798 Pte[Index] = (Index << 21) | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;\r
529a5a86
MK
799 }\r
800\r
801 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
802 Pages = (UINTN)PageTable + EFI_PAGES_TO_SIZE (5);\r
803 GuardPage = mSmmStackArrayBase + EFI_PAGE_SIZE;\r
804 Pdpte = (UINT64*)PageTable;\r
805 for (PageIndex = Low2MBoundary; PageIndex <= High2MBoundary; PageIndex += SIZE_2MB) {\r
806 Pte = (UINT64*)(UINTN)(Pdpte[BitFieldRead32 ((UINT32)PageIndex, 30, 31)] & ~(EFI_PAGE_SIZE - 1));\r
881520ea 807 Pte[BitFieldRead32 ((UINT32)PageIndex, 21, 29)] = (UINT64)Pages | PAGE_ATTRIBUTE_BITS;\r
529a5a86
MK
808 //\r
809 // Fill in Page Table Entries\r
810 //\r
811 Pte = (UINT64*)Pages;\r
812 PageAddress = PageIndex;\r
813 for (Index = 0; Index < EFI_PAGE_SIZE / sizeof (*Pte); Index++) {\r
814 if (PageAddress == GuardPage) {\r
815 //\r
816 // Mark the guard page as non-present\r
817 //\r
818 Pte[Index] = PageAddress;\r
819 GuardPage += mSmmStackSize;\r
820 if (GuardPage > mSmmStackArrayEnd) {\r
821 GuardPage = 0;\r
822 }\r
823 } else {\r
881520ea 824 Pte[Index] = PageAddress | PAGE_ATTRIBUTE_BITS;\r
529a5a86
MK
825 }\r
826 PageAddress+= EFI_PAGE_SIZE;\r
827 }\r
828 Pages += EFI_PAGE_SIZE;\r
829 }\r
830 }\r
831\r
832 return (UINT32)(UINTN)PageTable;\r
833}\r
834\r
835/**\r
836 Set memory cache ability.\r
837\r
838 @param PageTable PageTable Address\r
839 @param Address Memory Address to change cache ability\r
840 @param Cacheability Cache ability to set\r
841\r
842**/\r
843VOID\r
844SetCacheability (\r
845 IN UINT64 *PageTable,\r
846 IN UINTN Address,\r
847 IN UINT8 Cacheability\r
848 )\r
849{\r
850 UINTN PTIndex;\r
851 VOID *NewPageTableAddress;\r
852 UINT64 *NewPageTable;\r
853 UINTN Index;\r
854\r
855 ASSERT ((Address & EFI_PAGE_MASK) == 0);\r
856\r
857 if (sizeof (UINTN) == sizeof (UINT64)) {\r
858 PTIndex = (UINTN)RShiftU64 (Address, 39) & 0x1ff;\r
859 ASSERT (PageTable[PTIndex] & IA32_PG_P);\r
860 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & gPhyMask);\r
861 }\r
862\r
863 PTIndex = (UINTN)RShiftU64 (Address, 30) & 0x1ff;\r
864 ASSERT (PageTable[PTIndex] & IA32_PG_P);\r
865 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & gPhyMask);\r
866\r
867 //\r
868 // A perfect implementation should check the original cacheability with the\r
869 // one being set, and break a 2M page entry into pieces only when they\r
870 // disagreed.\r
871 //\r
872 PTIndex = (UINTN)RShiftU64 (Address, 21) & 0x1ff;\r
873 if ((PageTable[PTIndex] & IA32_PG_PS) != 0) {\r
874 //\r
875 // Allocate a page from SMRAM\r
876 //\r
21c17193 877 NewPageTableAddress = AllocatePageTableMemory (1);\r
529a5a86
MK
878 ASSERT (NewPageTableAddress != NULL);\r
879\r
880 NewPageTable = (UINT64 *)NewPageTableAddress;\r
881\r
882 for (Index = 0; Index < 0x200; Index++) {\r
883 NewPageTable[Index] = PageTable[PTIndex];\r
884 if ((NewPageTable[Index] & IA32_PG_PAT_2M) != 0) {\r
885 NewPageTable[Index] &= ~((UINT64)IA32_PG_PAT_2M);\r
886 NewPageTable[Index] |= (UINT64)IA32_PG_PAT_4K;\r
887 }\r
888 NewPageTable[Index] |= (UINT64)(Index << EFI_PAGE_SHIFT);\r
889 }\r
890\r
881520ea 891 PageTable[PTIndex] = ((UINTN)NewPageTableAddress & gPhyMask) | PAGE_ATTRIBUTE_BITS;\r
529a5a86
MK
892 }\r
893\r
894 ASSERT (PageTable[PTIndex] & IA32_PG_P);\r
895 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & gPhyMask);\r
896\r
897 PTIndex = (UINTN)RShiftU64 (Address, 12) & 0x1ff;\r
898 ASSERT (PageTable[PTIndex] & IA32_PG_P);\r
899 PageTable[PTIndex] &= ~((UINT64)((IA32_PG_PAT_4K | IA32_PG_CD | IA32_PG_WT)));\r
900 PageTable[PTIndex] |= (UINT64)Cacheability;\r
901}\r
902\r
903\r
904/**\r
905 Schedule a procedure to run on the specified CPU.\r
906\r
907 @param Procedure The address of the procedure to run\r
908 @param CpuIndex Target CPU Index\r
909 @param ProcArguments The parameter to pass to the procedure\r
910\r
911 @retval EFI_INVALID_PARAMETER CpuNumber not valid\r
912 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP\r
913 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM\r
914 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy\r
915 @retval EFI_SUCCESS The procedure has been successfully scheduled\r
916\r
917**/\r
918EFI_STATUS\r
919EFIAPI\r
920SmmStartupThisAp (\r
921 IN EFI_AP_PROCEDURE Procedure,\r
922 IN UINTN CpuIndex,\r
923 IN OUT VOID *ProcArguments OPTIONAL\r
924 )\r
925{\r
926 if (CpuIndex >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus ||\r
927 CpuIndex == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu ||\r
928 !mSmmMpSyncData->CpuData[CpuIndex].Present ||\r
929 gSmmCpuPrivate->Operation[CpuIndex] == SmmCpuRemove ||\r
930 !AcquireSpinLockOrFail (&mSmmMpSyncData->CpuData[CpuIndex].Busy)) {\r
931 return EFI_INVALID_PARAMETER;\r
932 }\r
933\r
934 mSmmMpSyncData->CpuData[CpuIndex].Procedure = Procedure;\r
935 mSmmMpSyncData->CpuData[CpuIndex].Parameter = ProcArguments;\r
936 ReleaseSemaphore (&mSmmMpSyncData->CpuData[CpuIndex].Run);\r
937\r
938 if (FeaturePcdGet (PcdCpuSmmBlockStartupThisAp)) {\r
939 AcquireSpinLock (&mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
940 ReleaseSpinLock (&mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
941 }\r
942 return EFI_SUCCESS;\r
943}\r
944\r
f45f2d4a 945/**\r
3eed6dda 946 This function sets DR6 & DR7 according to SMM save state, before running SMM C code.\r
f45f2d4a
JY
947 They are useful when you want to enable hardware breakpoints in SMM without entry SMM mode.\r
948\r
949 NOTE: It might not be appreciated in runtime since it might\r
950 conflict with OS debugging facilities. Turn them off in RELEASE.\r
951\r
952 @param CpuIndex CPU Index\r
953\r
954**/\r
955VOID\r
956EFIAPI\r
957CpuSmmDebugEntry (\r
958 IN UINTN CpuIndex\r
959 )\r
960{\r
961 SMRAM_SAVE_STATE_MAP *CpuSaveState;\r
962 \r
963 if (FeaturePcdGet (PcdCpuSmmDebug)) {\r
3eed6dda 964 CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];\r
f45f2d4a
JY
965 if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {\r
966 AsmWriteDr6 (CpuSaveState->x86._DR6);\r
967 AsmWriteDr7 (CpuSaveState->x86._DR7);\r
968 } else {\r
969 AsmWriteDr6 ((UINTN)CpuSaveState->x64._DR6);\r
970 AsmWriteDr7 ((UINTN)CpuSaveState->x64._DR7);\r
971 }\r
972 }\r
973}\r
974\r
975/**\r
3eed6dda 976 This function restores DR6 & DR7 to SMM save state.\r
f45f2d4a
JY
977\r
978 NOTE: It might not be appreciated in runtime since it might\r
979 conflict with OS debugging facilities. Turn them off in RELEASE.\r
980\r
981 @param CpuIndex CPU Index\r
982\r
983**/\r
984VOID\r
985EFIAPI\r
986CpuSmmDebugExit (\r
987 IN UINTN CpuIndex\r
988 )\r
989{\r
990 SMRAM_SAVE_STATE_MAP *CpuSaveState;\r
991\r
992 if (FeaturePcdGet (PcdCpuSmmDebug)) {\r
3eed6dda 993 CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];\r
f45f2d4a
JY
994 if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {\r
995 CpuSaveState->x86._DR7 = (UINT32)AsmReadDr7 ();\r
996 CpuSaveState->x86._DR6 = (UINT32)AsmReadDr6 ();\r
997 } else {\r
998 CpuSaveState->x64._DR7 = AsmReadDr7 ();\r
999 CpuSaveState->x64._DR6 = AsmReadDr6 ();\r
1000 }\r
1001 }\r
1002}\r
1003\r
529a5a86
MK
1004/**\r
1005 C function for SMI entry, each processor comes here upon SMI trigger.\r
1006\r
1007 @param CpuIndex CPU Index\r
1008\r
1009**/\r
1010VOID\r
1011EFIAPI\r
1012SmiRendezvous (\r
1013 IN UINTN CpuIndex\r
1014 )\r
1015{\r
1016 EFI_STATUS Status;\r
1017 BOOLEAN ValidSmi;\r
1018 BOOLEAN IsBsp;\r
1019 BOOLEAN BspInProgress;\r
1020 UINTN Index;\r
1021 UINTN Cr2;\r
e1695f8d 1022 BOOLEAN XdDisableFlag;\r
529a5a86
MK
1023\r
1024 //\r
1025 // Save Cr2 because Page Fault exception in SMM may override its value\r
1026 //\r
1027 Cr2 = AsmReadCr2 ();\r
1028\r
1029 //\r
1030 // Perform CPU specific entry hooks\r
1031 //\r
1032 SmmCpuFeaturesRendezvousEntry (CpuIndex);\r
1033\r
1034 //\r
1035 // Determine if this is a valid SMI\r
1036 //\r
1037 ValidSmi = PlatformValidSmi();\r
1038\r
1039 //\r
1040 // Determine if BSP has been already in progress. Note this must be checked after\r
1041 // ValidSmi because BSP may clear a valid SMI source after checking in.\r
1042 //\r
1043 BspInProgress = mSmmMpSyncData->InsideSmm;\r
1044\r
1045 if (!BspInProgress && !ValidSmi) {\r
1046 //\r
1047 // If we reach here, it means when we sampled the ValidSmi flag, SMI status had not\r
1048 // been cleared by BSP in a new SMI run (so we have a truly invalid SMI), or SMI\r
1049 // status had been cleared by BSP and an existing SMI run has almost ended. (Note\r
1050 // we sampled ValidSmi flag BEFORE judging BSP-in-progress status.) In both cases, there\r
1051 // is nothing we need to do.\r
1052 //\r
1053 goto Exit;\r
1054 } else {\r
1055 //\r
1056 // Signal presence of this processor\r
1057 //\r
1058 if (ReleaseSemaphore (&mSmmMpSyncData->Counter) == 0) {\r
1059 //\r
1060 // BSP has already ended the synchronization, so QUIT!!!\r
1061 //\r
1062\r
1063 //\r
1064 // Wait for BSP's signal to finish SMI\r
1065 //\r
1066 while (mSmmMpSyncData->AllCpusInSync) {\r
1067 CpuPause ();\r
1068 }\r
1069 goto Exit;\r
1070 } else {\r
1071\r
1072 //\r
1073 // The BUSY lock is initialized to Released state.\r
1074 // This needs to be done early enough to be ready for BSP's SmmStartupThisAp() call.\r
1075 // E.g., with Relaxed AP flow, SmmStartupThisAp() may be called immediately\r
1076 // after AP's present flag is detected.\r
1077 //\r
1078 InitializeSpinLock (&mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
1079 }\r
1080\r
1081 //\r
e1695f8d 1082 // Try to enable XD\r
529a5a86 1083 //\r
e1695f8d 1084 XdDisableFlag = FALSE;\r
529a5a86 1085 if (mXdSupported) {\r
e1695f8d
MK
1086 if ((AsmReadMsr64 (MSR_IA32_MISC_ENABLE) & B_XD_DISABLE_BIT) != 0) {\r
1087 XdDisableFlag = TRUE;\r
1088 AsmMsrAnd64 (MSR_IA32_MISC_ENABLE, ~B_XD_DISABLE_BIT);\r
1089 }\r
529a5a86
MK
1090 ActivateXd ();\r
1091 }\r
1092\r
1093 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r
1094 ActivateSmmProfile (CpuIndex);\r
1095 }\r
1096\r
1097 if (BspInProgress) {\r
1098 //\r
1099 // BSP has been elected. Follow AP path, regardless of ValidSmi flag\r
1100 // as BSP may have cleared the SMI status\r
1101 //\r
1102 APHandler (CpuIndex, ValidSmi, mSmmMpSyncData->EffectiveSyncMode);\r
1103 } else {\r
1104 //\r
1105 // We have a valid SMI\r
1106 //\r
1107\r
1108 //\r
1109 // Elect BSP\r
1110 //\r
1111 IsBsp = FALSE;\r
1112 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {\r
1113 if (!mSmmMpSyncData->SwitchBsp || mSmmMpSyncData->CandidateBsp[CpuIndex]) {\r
1114 //\r
1115 // Call platform hook to do BSP election\r
1116 //\r
1117 Status = PlatformSmmBspElection (&IsBsp);\r
1118 if (EFI_SUCCESS == Status) {\r
1119 //\r
1120 // Platform hook determines successfully\r
1121 //\r
1122 if (IsBsp) {\r
1123 mSmmMpSyncData->BspIndex = (UINT32)CpuIndex;\r
1124 }\r
1125 } else {\r
1126 //\r
1127 // Platform hook fails to determine, use default BSP election method\r
1128 //\r
1129 InterlockedCompareExchange32 (\r
1130 (UINT32*)&mSmmMpSyncData->BspIndex,\r
1131 (UINT32)-1,\r
1132 (UINT32)CpuIndex\r
1133 );\r
1134 }\r
1135 }\r
1136 }\r
1137\r
1138 //\r
1139 // "mSmmMpSyncData->BspIndex == CpuIndex" means this is the BSP\r
1140 //\r
1141 if (mSmmMpSyncData->BspIndex == CpuIndex) {\r
1142\r
1143 //\r
1144 // Clear last request for SwitchBsp.\r
1145 //\r
1146 if (mSmmMpSyncData->SwitchBsp) {\r
1147 mSmmMpSyncData->SwitchBsp = FALSE;\r
1148 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
1149 mSmmMpSyncData->CandidateBsp[Index] = FALSE;\r
1150 }\r
1151 }\r
1152\r
1153 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r
1154 SmmProfileRecordSmiNum ();\r
1155 }\r
1156\r
1157 //\r
1158 // BSP Handler is always called with a ValidSmi == TRUE\r
1159 //\r
1160 BSPHandler (CpuIndex, mSmmMpSyncData->EffectiveSyncMode);\r
529a5a86
MK
1161 } else {\r
1162 APHandler (CpuIndex, ValidSmi, mSmmMpSyncData->EffectiveSyncMode);\r
1163 }\r
1164 }\r
1165\r
1166 ASSERT (mSmmMpSyncData->CpuData[CpuIndex].Run == 0);\r
1167\r
1168 //\r
1169 // Wait for BSP's signal to exit SMI\r
1170 //\r
1171 while (mSmmMpSyncData->AllCpusInSync) {\r
1172 CpuPause ();\r
e1695f8d
MK
1173 }\r
1174\r
1175 //\r
1176 // Restore XD\r
1177 //\r
1178 if (XdDisableFlag) {\r
1179 AsmMsrOr64 (MSR_IA32_MISC_ENABLE, B_XD_DISABLE_BIT);\r
529a5a86
MK
1180 }\r
1181 }\r
1182\r
1183Exit:\r
1184 SmmCpuFeaturesRendezvousExit (CpuIndex);\r
1185 //\r
1186 // Restore Cr2\r
1187 //\r
1188 AsmWriteCr2 (Cr2);\r
1189}\r
1190\r
1191\r
1192/**\r
1193 Initialize un-cacheable data.\r
1194\r
1195**/\r
1196VOID\r
1197EFIAPI\r
1198InitializeMpSyncData (\r
1199 VOID\r
1200 )\r
1201{\r
1202 if (mSmmMpSyncData != NULL) {\r
1203 ZeroMem (mSmmMpSyncData, mSmmMpSyncDataSize);\r
1204 mSmmMpSyncData->CpuData = (SMM_CPU_DATA_BLOCK *)((UINT8 *)mSmmMpSyncData + sizeof (SMM_DISPATCHER_MP_SYNC_DATA));\r
1205 mSmmMpSyncData->CandidateBsp = (BOOLEAN *)(mSmmMpSyncData->CpuData + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus);\r
1206 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {\r
1207 //\r
1208 // Enable BSP election by setting BspIndex to -1\r
1209 //\r
1210 mSmmMpSyncData->BspIndex = (UINT32)-1;\r
1211 }\r
1212 mSmmMpSyncData->EffectiveSyncMode = (SMM_CPU_SYNC_MODE) PcdGet8 (PcdCpuSmmSyncMode);\r
1213 }\r
1214}\r
1215\r
1216/**\r
1217 Initialize global data for MP synchronization.\r
1218\r
1219 @param Stacks Base address of SMI stack buffer for all processors.\r
1220 @param StackSize Stack size for each processor in SMM.\r
1221\r
1222**/\r
1223UINT32\r
1224InitializeMpServiceData (\r
1225 IN VOID *Stacks,\r
1226 IN UINTN StackSize\r
1227 )\r
1228{\r
1229 UINT32 Cr3;\r
1230 UINTN Index;\r
1231 MTRR_SETTINGS *Mtrr;\r
1232 PROCESSOR_SMM_DESCRIPTOR *Psd;\r
529a5a86 1233 UINT8 *GdtTssTables;\r
529a5a86
MK
1234 UINTN GdtTableStepSize;\r
1235\r
1236 //\r
1237 // Initialize physical address mask\r
1238 // NOTE: Physical memory above virtual address limit is not supported !!!\r
1239 //\r
1240 AsmCpuid (0x80000008, (UINT32*)&Index, NULL, NULL, NULL);\r
1241 gPhyMask = LShiftU64 (1, (UINT8)Index) - 1;\r
1242 gPhyMask &= (1ull << 48) - EFI_PAGE_SIZE;\r
1243\r
1244 //\r
1245 // Create page tables\r
1246 //\r
1247 Cr3 = SmmInitPageTable ();\r
1248\r
fe5f1949 1249 GdtTssTables = InitGdt (Cr3, &GdtTableStepSize);\r
529a5a86
MK
1250\r
1251 //\r
1252 // Initialize PROCESSOR_SMM_DESCRIPTOR for each CPU\r
1253 //\r
1254 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
1255 Psd = (PROCESSOR_SMM_DESCRIPTOR *)(VOID *)(UINTN)(mCpuHotPlugData.SmBase[Index] + SMM_PSD_OFFSET);\r
1256 CopyMem (Psd, &gcPsd, sizeof (gcPsd));\r
fe5f1949
JY
1257 Psd->SmmGdtPtr = (UINT64)(UINTN)(GdtTssTables + GdtTableStepSize * Index);\r
1258 Psd->SmmGdtSize = gcSmiGdtr.Limit + 1;\r
529a5a86
MK
1259\r
1260 //\r
1261 // Install SMI handler\r
1262 //\r
1263 InstallSmiHandler (\r
1264 Index,\r
1265 (UINT32)mCpuHotPlugData.SmBase[Index],\r
1266 (VOID*)((UINTN)Stacks + (StackSize * Index)),\r
1267 StackSize,\r
1268 (UINTN)Psd->SmmGdtPtr,\r
1269 Psd->SmmGdtSize,\r
1270 gcSmiIdtr.Base,\r
1271 gcSmiIdtr.Limit + 1,\r
1272 Cr3\r
1273 );\r
1274 }\r
1275\r
1276 //\r
1277 // Initialize mSmmMpSyncData\r
1278 //\r
1279 mSmmMpSyncDataSize = sizeof (SMM_DISPATCHER_MP_SYNC_DATA) +\r
1280 (sizeof (SMM_CPU_DATA_BLOCK) + sizeof (BOOLEAN)) * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;\r
1281 mSmmMpSyncData = (SMM_DISPATCHER_MP_SYNC_DATA*) AllocatePages (EFI_SIZE_TO_PAGES (mSmmMpSyncDataSize));\r
1282 ASSERT (mSmmMpSyncData != NULL);\r
1283 InitializeMpSyncData ();\r
1284\r
1285 //\r
1286 // Record current MTRR settings\r
1287 //\r
1288 ZeroMem(gSmiMtrrs, sizeof (gSmiMtrrs));\r
1289 Mtrr = (MTRR_SETTINGS*)gSmiMtrrs;\r
1290 MtrrGetAllMtrrs (Mtrr);\r
1291\r
1292 return Cr3;\r
1293}\r
1294\r
1295/**\r
1296\r
1297 Register the SMM Foundation entry point.\r
1298\r
1299 @param This Pointer to EFI_SMM_CONFIGURATION_PROTOCOL instance\r
1300 @param SmmEntryPoint SMM Foundation EntryPoint\r
1301\r
1302 @retval EFI_SUCCESS Successfully to register SMM foundation entry point\r
1303\r
1304**/\r
1305EFI_STATUS\r
1306EFIAPI\r
1307RegisterSmmEntry (\r
1308 IN CONST EFI_SMM_CONFIGURATION_PROTOCOL *This,\r
1309 IN EFI_SMM_ENTRY_POINT SmmEntryPoint\r
1310 )\r
1311{\r
1312 //\r
1313 // Record SMM Foundation EntryPoint, later invoke it on SMI entry vector.\r
1314 //\r
1315 gSmmCpuPrivate->SmmCoreEntry = SmmEntryPoint;\r
1316 return EFI_SUCCESS;\r
1317}\r