]> git.proxmox.com Git - mirror_edk2.git/blame - UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c
UefiCpuPkg/PiSmmCpuDxeSmm: Allocate buffer for global semaphores
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / MpService.c
CommitLineData
529a5a86
MK
1/** @file\r
2SMM MP service implementation\r
3\r
e1695f8d 4Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.<BR>\r
529a5a86
MK
5This program and the accompanying materials\r
6are licensed and made available under the terms and conditions of the BSD License\r
7which accompanies this distribution. The full text of the license may be found at\r
8http://opensource.org/licenses/bsd-license.php\r
9\r
10THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
11WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
12\r
13**/\r
14\r
15#include "PiSmmCpuDxeSmm.h"\r
16\r
17//\r
18// Slots for all MTRR( FIXED MTRR + VARIABLE MTRR + MTRR_LIB_IA32_MTRR_DEF_TYPE)\r
19//\r
20UINT64 gSmiMtrrs[MTRR_NUMBER_OF_FIXED_MTRR + 2 * MTRR_NUMBER_OF_VARIABLE_MTRR + 1];\r
21UINT64 gPhyMask;\r
22SMM_DISPATCHER_MP_SYNC_DATA *mSmmMpSyncData = NULL;\r
23UINTN mSmmMpSyncDataSize;\r
1d648531
JF
24SMM_CPU_SEMAPHORES mSmmCpuSemaphores;\r
25UINTN mSemaphoreSize;\r
529a5a86
MK
26\r
27/**\r
28 Performs an atomic compare exchange operation to get semaphore.\r
29 The compare exchange operation must be performed using\r
30 MP safe mechanisms.\r
31\r
32 @param Sem IN: 32-bit unsigned integer\r
33 OUT: original integer - 1\r
34 @return Original integer - 1\r
35\r
36**/\r
37UINT32\r
38WaitForSemaphore (\r
39 IN OUT volatile UINT32 *Sem\r
40 )\r
41{\r
42 UINT32 Value;\r
43\r
44 do {\r
45 Value = *Sem;\r
46 } while (Value == 0 ||\r
47 InterlockedCompareExchange32 (\r
48 (UINT32*)Sem,\r
49 Value,\r
50 Value - 1\r
51 ) != Value);\r
52 return Value - 1;\r
53}\r
54\r
55\r
56/**\r
57 Performs an atomic compare exchange operation to release semaphore.\r
58 The compare exchange operation must be performed using\r
59 MP safe mechanisms.\r
60\r
61 @param Sem IN: 32-bit unsigned integer\r
62 OUT: original integer + 1\r
63 @return Original integer + 1\r
64\r
65**/\r
66UINT32\r
67ReleaseSemaphore (\r
68 IN OUT volatile UINT32 *Sem\r
69 )\r
70{\r
71 UINT32 Value;\r
72\r
73 do {\r
74 Value = *Sem;\r
75 } while (Value + 1 != 0 &&\r
76 InterlockedCompareExchange32 (\r
77 (UINT32*)Sem,\r
78 Value,\r
79 Value + 1\r
80 ) != Value);\r
81 return Value + 1;\r
82}\r
83\r
84/**\r
85 Performs an atomic compare exchange operation to lock semaphore.\r
86 The compare exchange operation must be performed using\r
87 MP safe mechanisms.\r
88\r
89 @param Sem IN: 32-bit unsigned integer\r
90 OUT: -1\r
91 @return Original integer\r
92\r
93**/\r
94UINT32\r
95LockdownSemaphore (\r
96 IN OUT volatile UINT32 *Sem\r
97 )\r
98{\r
99 UINT32 Value;\r
100\r
101 do {\r
102 Value = *Sem;\r
103 } while (InterlockedCompareExchange32 (\r
104 (UINT32*)Sem,\r
105 Value, (UINT32)-1\r
106 ) != Value);\r
107 return Value;\r
108}\r
109\r
110/**\r
111 Wait all APs to performs an atomic compare exchange operation to release semaphore.\r
112\r
113 @param NumberOfAPs AP number\r
114\r
115**/\r
116VOID\r
117WaitForAllAPs (\r
118 IN UINTN NumberOfAPs\r
119 )\r
120{\r
121 UINTN BspIndex;\r
122\r
123 BspIndex = mSmmMpSyncData->BspIndex;\r
124 while (NumberOfAPs-- > 0) {\r
125 WaitForSemaphore (&mSmmMpSyncData->CpuData[BspIndex].Run);\r
126 }\r
127}\r
128\r
129/**\r
130 Performs an atomic compare exchange operation to release semaphore\r
131 for each AP.\r
132\r
133**/\r
134VOID\r
135ReleaseAllAPs (\r
136 VOID\r
137 )\r
138{\r
139 UINTN Index;\r
140 UINTN BspIndex;\r
141\r
142 BspIndex = mSmmMpSyncData->BspIndex;\r
143 for (Index = mMaxNumberOfCpus; Index-- > 0;) {\r
144 if (Index != BspIndex && mSmmMpSyncData->CpuData[Index].Present) {\r
145 ReleaseSemaphore (&mSmmMpSyncData->CpuData[Index].Run);\r
146 }\r
147 }\r
148}\r
149\r
150/**\r
151 Checks if all CPUs (with certain exceptions) have checked in for this SMI run\r
152\r
153 @param Exceptions CPU Arrival exception flags.\r
154\r
155 @retval TRUE if all CPUs the have checked in.\r
156 @retval FALSE if at least one Normal AP hasn't checked in.\r
157\r
158**/\r
159BOOLEAN\r
160AllCpusInSmmWithExceptions (\r
161 SMM_CPU_ARRIVAL_EXCEPTIONS Exceptions\r
162 )\r
163{\r
164 UINTN Index;\r
165 SMM_CPU_DATA_BLOCK *CpuData;\r
166 EFI_PROCESSOR_INFORMATION *ProcessorInfo;\r
167\r
168 ASSERT (mSmmMpSyncData->Counter <= mNumberOfCpus);\r
169\r
170 if (mSmmMpSyncData->Counter == mNumberOfCpus) {\r
171 return TRUE;\r
172 }\r
173\r
174 CpuData = mSmmMpSyncData->CpuData;\r
175 ProcessorInfo = gSmmCpuPrivate->ProcessorInfo;\r
176 for (Index = mMaxNumberOfCpus; Index-- > 0;) {\r
177 if (!CpuData[Index].Present && ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID) {\r
178 if (((Exceptions & ARRIVAL_EXCEPTION_DELAYED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmDelayed) != 0) {\r
179 continue;\r
180 }\r
181 if (((Exceptions & ARRIVAL_EXCEPTION_BLOCKED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmBlocked) != 0) {\r
182 continue;\r
183 }\r
184 if (((Exceptions & ARRIVAL_EXCEPTION_SMI_DISABLED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmEnable) != 0) {\r
185 continue;\r
186 }\r
187 return FALSE;\r
188 }\r
189 }\r
190\r
191\r
192 return TRUE;\r
193}\r
194\r
195\r
196/**\r
197 Given timeout constraint, wait for all APs to arrive, and insure when this function returns, no AP will execute normal mode code before\r
198 entering SMM, except SMI disabled APs.\r
199\r
200**/\r
201VOID\r
202SmmWaitForApArrival (\r
203 VOID\r
204 )\r
205{\r
206 UINT64 Timer;\r
207 UINTN Index;\r
208\r
209 ASSERT (mSmmMpSyncData->Counter <= mNumberOfCpus);\r
210\r
211 //\r
212 // Platform implementor should choose a timeout value appropriately:\r
213 // - The timeout value should balance the SMM time constrains and the likelihood that delayed CPUs are excluded in the SMM run. Note\r
214 // the SMI Handlers must ALWAYS take into account the cases that not all APs are available in an SMI run.\r
215 // - The timeout value must, in the case of 2nd timeout, be at least long enough to give time for all APs to receive the SMI IPI\r
216 // and either enter SMM or buffer the SMI, to insure there is no CPU running normal mode code when SMI handling starts. This will\r
217 // be TRUE even if a blocked CPU is brought out of the blocked state by a normal mode CPU (before the normal mode CPU received the\r
218 // SMI IPI), because with a buffered SMI, and CPU will enter SMM immediately after it is brought out of the blocked state.\r
219 // - The timeout value must be longer than longest possible IO operation in the system\r
220 //\r
221\r
222 //\r
223 // Sync with APs 1st timeout\r
224 //\r
225 for (Timer = StartSyncTimer ();\r
226 !IsSyncTimerTimeout (Timer) &&\r
227 !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED | ARRIVAL_EXCEPTION_SMI_DISABLED );\r
228 ) {\r
229 CpuPause ();\r
230 }\r
231\r
232 //\r
233 // Not all APs have arrived, so we need 2nd round of timeout. IPIs should be sent to ALL none present APs,\r
234 // because:\r
235 // a) Delayed AP may have just come out of the delayed state. Blocked AP may have just been brought out of blocked state by some AP running\r
236 // normal mode code. These APs need to be guaranteed to have an SMI pending to insure that once they are out of delayed / blocked state, they\r
237 // enter SMI immediately without executing instructions in normal mode. Note traditional flow requires there are no APs doing normal mode\r
238 // work while SMI handling is on-going.\r
239 // b) As a consequence of SMI IPI sending, (spurious) SMI may occur after this SMM run.\r
240 // c) ** NOTE **: Use SMI disabling feature VERY CAREFULLY (if at all) for traditional flow, because a processor in SMI-disabled state\r
241 // will execute normal mode code, which breaks the traditional SMI handlers' assumption that no APs are doing normal\r
242 // mode work while SMI handling is on-going.\r
243 // d) We don't add code to check SMI disabling status to skip sending IPI to SMI disabled APs, because:\r
244 // - In traditional flow, SMI disabling is discouraged.\r
245 // - In relaxed flow, CheckApArrival() will check SMI disabling status before calling this function.\r
246 // In both cases, adding SMI-disabling checking code increases overhead.\r
247 //\r
248 if (mSmmMpSyncData->Counter < mNumberOfCpus) {\r
249 //\r
250 // Send SMI IPIs to bring outside processors in\r
251 //\r
252 for (Index = mMaxNumberOfCpus; Index-- > 0;) {\r
253 if (!mSmmMpSyncData->CpuData[Index].Present && gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID) {\r
254 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId);\r
255 }\r
256 }\r
257\r
258 //\r
259 // Sync with APs 2nd timeout.\r
260 //\r
261 for (Timer = StartSyncTimer ();\r
262 !IsSyncTimerTimeout (Timer) &&\r
263 !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED | ARRIVAL_EXCEPTION_SMI_DISABLED );\r
264 ) {\r
265 CpuPause ();\r
266 }\r
267 }\r
268\r
269 return;\r
270}\r
271\r
272\r
273/**\r
274 Replace OS MTRR's with SMI MTRR's.\r
275\r
276 @param CpuIndex Processor Index\r
277\r
278**/\r
279VOID\r
280ReplaceOSMtrrs (\r
281 IN UINTN CpuIndex\r
282 )\r
283{\r
284 PROCESSOR_SMM_DESCRIPTOR *Psd;\r
285 UINT64 *SmiMtrrs;\r
286 MTRR_SETTINGS *BiosMtrr;\r
287\r
288 Psd = (PROCESSOR_SMM_DESCRIPTOR*)(mCpuHotPlugData.SmBase[CpuIndex] + SMM_PSD_OFFSET);\r
289 SmiMtrrs = (UINT64*)(UINTN)Psd->MtrrBaseMaskPtr;\r
290\r
291 SmmCpuFeaturesDisableSmrr ();\r
292\r
293 //\r
294 // Replace all MTRRs registers\r
295 //\r
296 BiosMtrr = (MTRR_SETTINGS*)SmiMtrrs;\r
297 MtrrSetAllMtrrs(BiosMtrr);\r
298}\r
299\r
300/**\r
301 SMI handler for BSP.\r
302\r
303 @param CpuIndex BSP processor Index\r
304 @param SyncMode SMM MP sync mode\r
305\r
306**/\r
307VOID\r
308BSPHandler (\r
309 IN UINTN CpuIndex,\r
310 IN SMM_CPU_SYNC_MODE SyncMode\r
311 )\r
312{\r
313 UINTN Index;\r
314 MTRR_SETTINGS Mtrrs;\r
315 UINTN ApCount;\r
316 BOOLEAN ClearTopLevelSmiResult;\r
317 UINTN PresentCount;\r
318\r
319 ASSERT (CpuIndex == mSmmMpSyncData->BspIndex);\r
320 ApCount = 0;\r
321\r
322 //\r
323 // Flag BSP's presence\r
324 //\r
325 mSmmMpSyncData->InsideSmm = TRUE;\r
326\r
327 //\r
328 // Initialize Debug Agent to start source level debug in BSP handler\r
329 //\r
330 InitializeDebugAgent (DEBUG_AGENT_INIT_ENTER_SMI, NULL, NULL);\r
331\r
332 //\r
333 // Mark this processor's presence\r
334 //\r
335 mSmmMpSyncData->CpuData[CpuIndex].Present = TRUE;\r
336\r
337 //\r
338 // Clear platform top level SMI status bit before calling SMI handlers. If\r
339 // we cleared it after SMI handlers are run, we would miss the SMI that\r
340 // occurs after SMI handlers are done and before SMI status bit is cleared.\r
341 //\r
342 ClearTopLevelSmiResult = ClearTopLevelSmiStatus();\r
343 ASSERT (ClearTopLevelSmiResult == TRUE);\r
344\r
345 //\r
346 // Set running processor index\r
347 //\r
348 gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu = CpuIndex;\r
349\r
350 //\r
351 // If Traditional Sync Mode or need to configure MTRRs: gather all available APs.\r
352 //\r
353 if (SyncMode == SmmCpuSyncModeTradition || SmmCpuFeaturesNeedConfigureMtrrs()) {\r
354\r
355 //\r
356 // Wait for APs to arrive\r
357 //\r
358 SmmWaitForApArrival();\r
359\r
360 //\r
361 // Lock the counter down and retrieve the number of APs\r
362 //\r
363 mSmmMpSyncData->AllCpusInSync = TRUE;\r
364 ApCount = LockdownSemaphore (&mSmmMpSyncData->Counter) - 1;\r
365\r
366 //\r
367 // Wait for all APs to get ready for programming MTRRs\r
368 //\r
369 WaitForAllAPs (ApCount);\r
370\r
371 if (SmmCpuFeaturesNeedConfigureMtrrs()) {\r
372 //\r
373 // Signal all APs it's time for backup MTRRs\r
374 //\r
375 ReleaseAllAPs ();\r
376\r
377 //\r
378 // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at\r
379 // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set\r
380 // to a large enough value to avoid this situation.\r
381 // Note: For HT capable CPUs, threads within a core share the same set of MTRRs.\r
382 // We do the backup first and then set MTRR to avoid race condition for threads\r
383 // in the same core.\r
384 //\r
385 MtrrGetAllMtrrs(&Mtrrs);\r
386\r
387 //\r
388 // Wait for all APs to complete their MTRR saving\r
389 //\r
390 WaitForAllAPs (ApCount);\r
391\r
392 //\r
393 // Let all processors program SMM MTRRs together\r
394 //\r
395 ReleaseAllAPs ();\r
396\r
397 //\r
398 // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at\r
399 // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set\r
400 // to a large enough value to avoid this situation.\r
401 //\r
402 ReplaceOSMtrrs (CpuIndex);\r
403\r
404 //\r
405 // Wait for all APs to complete their MTRR programming\r
406 //\r
407 WaitForAllAPs (ApCount);\r
408 }\r
409 }\r
410\r
411 //\r
412 // The BUSY lock is initialized to Acquired state\r
413 //\r
414 AcquireSpinLockOrFail (&mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
415\r
416 //\r
9f419739 417 // Perform the pre tasks\r
529a5a86 418 //\r
9f419739 419 PerformPreTasks ();\r
529a5a86
MK
420\r
421 //\r
422 // Invoke SMM Foundation EntryPoint with the processor information context.\r
423 //\r
424 gSmmCpuPrivate->SmmCoreEntry (&gSmmCpuPrivate->SmmCoreEntryContext);\r
425\r
426 //\r
427 // Make sure all APs have completed their pending none-block tasks\r
428 //\r
429 for (Index = mMaxNumberOfCpus; Index-- > 0;) {\r
430 if (Index != CpuIndex && mSmmMpSyncData->CpuData[Index].Present) {\r
431 AcquireSpinLock (&mSmmMpSyncData->CpuData[Index].Busy);\r
432 ReleaseSpinLock (&mSmmMpSyncData->CpuData[Index].Busy);;\r
433 }\r
434 }\r
435\r
436 //\r
437 // Perform the remaining tasks\r
438 //\r
439 PerformRemainingTasks ();\r
440\r
441 //\r
442 // If Relaxed-AP Sync Mode: gather all available APs after BSP SMM handlers are done, and\r
443 // make those APs to exit SMI synchronously. APs which arrive later will be excluded and\r
444 // will run through freely.\r
445 //\r
446 if (SyncMode != SmmCpuSyncModeTradition && !SmmCpuFeaturesNeedConfigureMtrrs()) {\r
447\r
448 //\r
449 // Lock the counter down and retrieve the number of APs\r
450 //\r
451 mSmmMpSyncData->AllCpusInSync = TRUE;\r
452 ApCount = LockdownSemaphore (&mSmmMpSyncData->Counter) - 1;\r
453 //\r
454 // Make sure all APs have their Present flag set\r
455 //\r
456 while (TRUE) {\r
457 PresentCount = 0;\r
458 for (Index = mMaxNumberOfCpus; Index-- > 0;) {\r
459 if (mSmmMpSyncData->CpuData[Index].Present) {\r
460 PresentCount ++;\r
461 }\r
462 }\r
463 if (PresentCount > ApCount) {\r
464 break;\r
465 }\r
466 }\r
467 }\r
468\r
469 //\r
470 // Notify all APs to exit\r
471 //\r
472 mSmmMpSyncData->InsideSmm = FALSE;\r
473 ReleaseAllAPs ();\r
474\r
475 //\r
476 // Wait for all APs to complete their pending tasks\r
477 //\r
478 WaitForAllAPs (ApCount);\r
479\r
480 if (SmmCpuFeaturesNeedConfigureMtrrs()) {\r
481 //\r
482 // Signal APs to restore MTRRs\r
483 //\r
484 ReleaseAllAPs ();\r
485\r
486 //\r
487 // Restore OS MTRRs\r
488 //\r
489 SmmCpuFeaturesReenableSmrr ();\r
490 MtrrSetAllMtrrs(&Mtrrs);\r
491\r
492 //\r
493 // Wait for all APs to complete MTRR programming\r
494 //\r
495 WaitForAllAPs (ApCount);\r
496 }\r
497\r
498 //\r
499 // Stop source level debug in BSP handler, the code below will not be\r
500 // debugged.\r
501 //\r
502 InitializeDebugAgent (DEBUG_AGENT_INIT_EXIT_SMI, NULL, NULL);\r
503\r
504 //\r
505 // Signal APs to Reset states/semaphore for this processor\r
506 //\r
507 ReleaseAllAPs ();\r
508\r
509 //\r
510 // Perform pending operations for hot-plug\r
511 //\r
512 SmmCpuUpdate ();\r
513\r
514 //\r
515 // Clear the Present flag of BSP\r
516 //\r
517 mSmmMpSyncData->CpuData[CpuIndex].Present = FALSE;\r
518\r
519 //\r
520 // Gather APs to exit SMM synchronously. Note the Present flag is cleared by now but\r
521 // WaitForAllAps does not depend on the Present flag.\r
522 //\r
523 WaitForAllAPs (ApCount);\r
524\r
525 //\r
526 // Reset BspIndex to -1, meaning BSP has not been elected.\r
527 //\r
528 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {\r
529 mSmmMpSyncData->BspIndex = (UINT32)-1;\r
530 }\r
531\r
532 //\r
533 // Allow APs to check in from this point on\r
534 //\r
535 mSmmMpSyncData->Counter = 0;\r
536 mSmmMpSyncData->AllCpusInSync = FALSE;\r
537}\r
538\r
539/**\r
540 SMI handler for AP.\r
541\r
542 @param CpuIndex AP processor Index.\r
543 @param ValidSmi Indicates that current SMI is a valid SMI or not.\r
544 @param SyncMode SMM MP sync mode.\r
545\r
546**/\r
547VOID\r
548APHandler (\r
549 IN UINTN CpuIndex,\r
550 IN BOOLEAN ValidSmi,\r
551 IN SMM_CPU_SYNC_MODE SyncMode\r
552 )\r
553{\r
554 UINT64 Timer;\r
555 UINTN BspIndex;\r
556 MTRR_SETTINGS Mtrrs;\r
557\r
558 //\r
559 // Timeout BSP\r
560 //\r
561 for (Timer = StartSyncTimer ();\r
562 !IsSyncTimerTimeout (Timer) &&\r
563 !mSmmMpSyncData->InsideSmm;\r
564 ) {\r
565 CpuPause ();\r
566 }\r
567\r
568 if (!mSmmMpSyncData->InsideSmm) {\r
569 //\r
570 // BSP timeout in the first round\r
571 //\r
572 if (mSmmMpSyncData->BspIndex != -1) {\r
573 //\r
574 // BSP Index is known\r
575 //\r
576 BspIndex = mSmmMpSyncData->BspIndex;\r
577 ASSERT (CpuIndex != BspIndex);\r
578\r
579 //\r
580 // Send SMI IPI to bring BSP in\r
581 //\r
582 SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[BspIndex].ProcessorId);\r
583\r
584 //\r
585 // Now clock BSP for the 2nd time\r
586 //\r
587 for (Timer = StartSyncTimer ();\r
588 !IsSyncTimerTimeout (Timer) &&\r
589 !mSmmMpSyncData->InsideSmm;\r
590 ) {\r
591 CpuPause ();\r
592 }\r
593\r
594 if (!mSmmMpSyncData->InsideSmm) {\r
595 //\r
596 // Give up since BSP is unable to enter SMM\r
597 // and signal the completion of this AP\r
598 WaitForSemaphore (&mSmmMpSyncData->Counter);\r
599 return;\r
600 }\r
601 } else {\r
602 //\r
603 // Don't know BSP index. Give up without sending IPI to BSP.\r
604 //\r
605 WaitForSemaphore (&mSmmMpSyncData->Counter);\r
606 return;\r
607 }\r
608 }\r
609\r
610 //\r
611 // BSP is available\r
612 //\r
613 BspIndex = mSmmMpSyncData->BspIndex;\r
614 ASSERT (CpuIndex != BspIndex);\r
615\r
616 //\r
617 // Mark this processor's presence\r
618 //\r
619 mSmmMpSyncData->CpuData[CpuIndex].Present = TRUE;\r
620\r
621 if (SyncMode == SmmCpuSyncModeTradition || SmmCpuFeaturesNeedConfigureMtrrs()) {\r
622 //\r
623 // Notify BSP of arrival at this point\r
624 //\r
625 ReleaseSemaphore (&mSmmMpSyncData->CpuData[BspIndex].Run);\r
626 }\r
627\r
628 if (SmmCpuFeaturesNeedConfigureMtrrs()) {\r
629 //\r
630 // Wait for the signal from BSP to backup MTRRs\r
631 //\r
632 WaitForSemaphore (&mSmmMpSyncData->CpuData[CpuIndex].Run);\r
633\r
634 //\r
635 // Backup OS MTRRs\r
636 //\r
637 MtrrGetAllMtrrs(&Mtrrs);\r
638\r
639 //\r
640 // Signal BSP the completion of this AP\r
641 //\r
642 ReleaseSemaphore (&mSmmMpSyncData->CpuData[BspIndex].Run);\r
643\r
644 //\r
645 // Wait for BSP's signal to program MTRRs\r
646 //\r
647 WaitForSemaphore (&mSmmMpSyncData->CpuData[CpuIndex].Run);\r
648\r
649 //\r
650 // Replace OS MTRRs with SMI MTRRs\r
651 //\r
652 ReplaceOSMtrrs (CpuIndex);\r
653\r
654 //\r
655 // Signal BSP the completion of this AP\r
656 //\r
657 ReleaseSemaphore (&mSmmMpSyncData->CpuData[BspIndex].Run);\r
658 }\r
659\r
660 while (TRUE) {\r
661 //\r
662 // Wait for something to happen\r
663 //\r
664 WaitForSemaphore (&mSmmMpSyncData->CpuData[CpuIndex].Run);\r
665\r
666 //\r
667 // Check if BSP wants to exit SMM\r
668 //\r
669 if (!mSmmMpSyncData->InsideSmm) {\r
670 break;\r
671 }\r
672\r
673 //\r
674 // BUSY should be acquired by SmmStartupThisAp()\r
675 //\r
676 ASSERT (\r
677 !AcquireSpinLockOrFail (&mSmmMpSyncData->CpuData[CpuIndex].Busy)\r
678 );\r
679\r
680 //\r
681 // Invoke the scheduled procedure\r
682 //\r
683 (*mSmmMpSyncData->CpuData[CpuIndex].Procedure) (\r
684 (VOID*)mSmmMpSyncData->CpuData[CpuIndex].Parameter\r
685 );\r
686\r
687 //\r
688 // Release BUSY\r
689 //\r
690 ReleaseSpinLock (&mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
691 }\r
692\r
693 if (SmmCpuFeaturesNeedConfigureMtrrs()) {\r
694 //\r
695 // Notify BSP the readiness of this AP to program MTRRs\r
696 //\r
697 ReleaseSemaphore (&mSmmMpSyncData->CpuData[BspIndex].Run);\r
698\r
699 //\r
700 // Wait for the signal from BSP to program MTRRs\r
701 //\r
702 WaitForSemaphore (&mSmmMpSyncData->CpuData[CpuIndex].Run);\r
703\r
704 //\r
705 // Restore OS MTRRs\r
706 //\r
707 SmmCpuFeaturesReenableSmrr ();\r
708 MtrrSetAllMtrrs(&Mtrrs);\r
709 }\r
710\r
711 //\r
712 // Notify BSP the readiness of this AP to Reset states/semaphore for this processor\r
713 //\r
714 ReleaseSemaphore (&mSmmMpSyncData->CpuData[BspIndex].Run);\r
715\r
716 //\r
717 // Wait for the signal from BSP to Reset states/semaphore for this processor\r
718 //\r
719 WaitForSemaphore (&mSmmMpSyncData->CpuData[CpuIndex].Run);\r
720\r
721 //\r
722 // Reset states/semaphore for this processor\r
723 //\r
724 mSmmMpSyncData->CpuData[CpuIndex].Present = FALSE;\r
725\r
726 //\r
727 // Notify BSP the readiness of this AP to exit SMM\r
728 //\r
729 ReleaseSemaphore (&mSmmMpSyncData->CpuData[BspIndex].Run);\r
730\r
731}\r
732\r
733/**\r
734 Create 4G PageTable in SMRAM.\r
735\r
736 @param ExtraPages Additional page numbers besides for 4G memory\r
881520ea 737 @param Is32BitPageTable Whether the page table is 32-bit PAE\r
529a5a86
MK
738 @return PageTable Address\r
739\r
740**/\r
741UINT32\r
742Gen4GPageTable (\r
881520ea
JY
743 IN UINTN ExtraPages,\r
744 IN BOOLEAN Is32BitPageTable\r
529a5a86
MK
745 )\r
746{\r
747 VOID *PageTable;\r
748 UINTN Index;\r
749 UINT64 *Pte;\r
750 UINTN PagesNeeded;\r
751 UINTN Low2MBoundary;\r
752 UINTN High2MBoundary;\r
753 UINTN Pages;\r
754 UINTN GuardPage;\r
755 UINT64 *Pdpte;\r
756 UINTN PageIndex;\r
757 UINTN PageAddress;\r
758\r
759 Low2MBoundary = 0;\r
760 High2MBoundary = 0;\r
761 PagesNeeded = 0;\r
762 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
763 //\r
764 // Add one more page for known good stack, then find the lower 2MB aligned address.\r
765 //\r
766 Low2MBoundary = (mSmmStackArrayBase + EFI_PAGE_SIZE) & ~(SIZE_2MB-1);\r
767 //\r
768 // Add two more pages for known good stack and stack guard page,\r
769 // then find the lower 2MB aligned address.\r
770 //\r
771 High2MBoundary = (mSmmStackArrayEnd - mSmmStackSize + EFI_PAGE_SIZE * 2) & ~(SIZE_2MB-1);\r
772 PagesNeeded = ((High2MBoundary - Low2MBoundary) / SIZE_2MB) + 1;\r
773 }\r
774 //\r
775 // Allocate the page table\r
776 //\r
21c17193 777 PageTable = AllocatePageTableMemory (ExtraPages + 5 + PagesNeeded);\r
529a5a86
MK
778 ASSERT (PageTable != NULL);\r
779\r
780 PageTable = (VOID *)((UINTN)PageTable + EFI_PAGES_TO_SIZE (ExtraPages));\r
781 Pte = (UINT64*)PageTable;\r
782\r
783 //\r
784 // Zero out all page table entries first\r
785 //\r
786 ZeroMem (Pte, EFI_PAGES_TO_SIZE (1));\r
787\r
788 //\r
789 // Set Page Directory Pointers\r
790 //\r
791 for (Index = 0; Index < 4; Index++) {\r
881520ea 792 Pte[Index] = (UINTN)PageTable + EFI_PAGE_SIZE * (Index + 1) + (Is32BitPageTable ? IA32_PAE_PDPTE_ATTRIBUTE_BITS : PAGE_ATTRIBUTE_BITS);\r
529a5a86
MK
793 }\r
794 Pte += EFI_PAGE_SIZE / sizeof (*Pte);\r
795\r
796 //\r
797 // Fill in Page Directory Entries\r
798 //\r
799 for (Index = 0; Index < EFI_PAGE_SIZE * 4 / sizeof (*Pte); Index++) {\r
881520ea 800 Pte[Index] = (Index << 21) | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;\r
529a5a86
MK
801 }\r
802\r
803 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
804 Pages = (UINTN)PageTable + EFI_PAGES_TO_SIZE (5);\r
805 GuardPage = mSmmStackArrayBase + EFI_PAGE_SIZE;\r
806 Pdpte = (UINT64*)PageTable;\r
807 for (PageIndex = Low2MBoundary; PageIndex <= High2MBoundary; PageIndex += SIZE_2MB) {\r
808 Pte = (UINT64*)(UINTN)(Pdpte[BitFieldRead32 ((UINT32)PageIndex, 30, 31)] & ~(EFI_PAGE_SIZE - 1));\r
881520ea 809 Pte[BitFieldRead32 ((UINT32)PageIndex, 21, 29)] = (UINT64)Pages | PAGE_ATTRIBUTE_BITS;\r
529a5a86
MK
810 //\r
811 // Fill in Page Table Entries\r
812 //\r
813 Pte = (UINT64*)Pages;\r
814 PageAddress = PageIndex;\r
815 for (Index = 0; Index < EFI_PAGE_SIZE / sizeof (*Pte); Index++) {\r
816 if (PageAddress == GuardPage) {\r
817 //\r
818 // Mark the guard page as non-present\r
819 //\r
820 Pte[Index] = PageAddress;\r
821 GuardPage += mSmmStackSize;\r
822 if (GuardPage > mSmmStackArrayEnd) {\r
823 GuardPage = 0;\r
824 }\r
825 } else {\r
881520ea 826 Pte[Index] = PageAddress | PAGE_ATTRIBUTE_BITS;\r
529a5a86
MK
827 }\r
828 PageAddress+= EFI_PAGE_SIZE;\r
829 }\r
830 Pages += EFI_PAGE_SIZE;\r
831 }\r
832 }\r
833\r
834 return (UINT32)(UINTN)PageTable;\r
835}\r
836\r
837/**\r
838 Set memory cache ability.\r
839\r
840 @param PageTable PageTable Address\r
841 @param Address Memory Address to change cache ability\r
842 @param Cacheability Cache ability to set\r
843\r
844**/\r
845VOID\r
846SetCacheability (\r
847 IN UINT64 *PageTable,\r
848 IN UINTN Address,\r
849 IN UINT8 Cacheability\r
850 )\r
851{\r
852 UINTN PTIndex;\r
853 VOID *NewPageTableAddress;\r
854 UINT64 *NewPageTable;\r
855 UINTN Index;\r
856\r
857 ASSERT ((Address & EFI_PAGE_MASK) == 0);\r
858\r
859 if (sizeof (UINTN) == sizeof (UINT64)) {\r
860 PTIndex = (UINTN)RShiftU64 (Address, 39) & 0x1ff;\r
861 ASSERT (PageTable[PTIndex] & IA32_PG_P);\r
862 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & gPhyMask);\r
863 }\r
864\r
865 PTIndex = (UINTN)RShiftU64 (Address, 30) & 0x1ff;\r
866 ASSERT (PageTable[PTIndex] & IA32_PG_P);\r
867 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & gPhyMask);\r
868\r
869 //\r
870 // A perfect implementation should check the original cacheability with the\r
871 // one being set, and break a 2M page entry into pieces only when they\r
872 // disagreed.\r
873 //\r
874 PTIndex = (UINTN)RShiftU64 (Address, 21) & 0x1ff;\r
875 if ((PageTable[PTIndex] & IA32_PG_PS) != 0) {\r
876 //\r
877 // Allocate a page from SMRAM\r
878 //\r
21c17193 879 NewPageTableAddress = AllocatePageTableMemory (1);\r
529a5a86
MK
880 ASSERT (NewPageTableAddress != NULL);\r
881\r
882 NewPageTable = (UINT64 *)NewPageTableAddress;\r
883\r
884 for (Index = 0; Index < 0x200; Index++) {\r
885 NewPageTable[Index] = PageTable[PTIndex];\r
886 if ((NewPageTable[Index] & IA32_PG_PAT_2M) != 0) {\r
887 NewPageTable[Index] &= ~((UINT64)IA32_PG_PAT_2M);\r
888 NewPageTable[Index] |= (UINT64)IA32_PG_PAT_4K;\r
889 }\r
890 NewPageTable[Index] |= (UINT64)(Index << EFI_PAGE_SHIFT);\r
891 }\r
892\r
881520ea 893 PageTable[PTIndex] = ((UINTN)NewPageTableAddress & gPhyMask) | PAGE_ATTRIBUTE_BITS;\r
529a5a86
MK
894 }\r
895\r
896 ASSERT (PageTable[PTIndex] & IA32_PG_P);\r
897 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & gPhyMask);\r
898\r
899 PTIndex = (UINTN)RShiftU64 (Address, 12) & 0x1ff;\r
900 ASSERT (PageTable[PTIndex] & IA32_PG_P);\r
901 PageTable[PTIndex] &= ~((UINT64)((IA32_PG_PAT_4K | IA32_PG_CD | IA32_PG_WT)));\r
902 PageTable[PTIndex] |= (UINT64)Cacheability;\r
903}\r
904\r
905\r
906/**\r
907 Schedule a procedure to run on the specified CPU.\r
908\r
909 @param Procedure The address of the procedure to run\r
910 @param CpuIndex Target CPU Index\r
911 @param ProcArguments The parameter to pass to the procedure\r
912\r
913 @retval EFI_INVALID_PARAMETER CpuNumber not valid\r
914 @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP\r
915 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM\r
916 @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy\r
917 @retval EFI_SUCCESS The procedure has been successfully scheduled\r
918\r
919**/\r
920EFI_STATUS\r
921EFIAPI\r
922SmmStartupThisAp (\r
923 IN EFI_AP_PROCEDURE Procedure,\r
924 IN UINTN CpuIndex,\r
925 IN OUT VOID *ProcArguments OPTIONAL\r
926 )\r
927{\r
928 if (CpuIndex >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus ||\r
929 CpuIndex == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu ||\r
930 !mSmmMpSyncData->CpuData[CpuIndex].Present ||\r
931 gSmmCpuPrivate->Operation[CpuIndex] == SmmCpuRemove ||\r
932 !AcquireSpinLockOrFail (&mSmmMpSyncData->CpuData[CpuIndex].Busy)) {\r
933 return EFI_INVALID_PARAMETER;\r
934 }\r
935\r
936 mSmmMpSyncData->CpuData[CpuIndex].Procedure = Procedure;\r
937 mSmmMpSyncData->CpuData[CpuIndex].Parameter = ProcArguments;\r
938 ReleaseSemaphore (&mSmmMpSyncData->CpuData[CpuIndex].Run);\r
939\r
940 if (FeaturePcdGet (PcdCpuSmmBlockStartupThisAp)) {\r
941 AcquireSpinLock (&mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
942 ReleaseSpinLock (&mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
943 }\r
944 return EFI_SUCCESS;\r
945}\r
946\r
f45f2d4a 947/**\r
3eed6dda 948 This function sets DR6 & DR7 according to SMM save state, before running SMM C code.\r
f45f2d4a
JY
949 They are useful when you want to enable hardware breakpoints in SMM without entry SMM mode.\r
950\r
951 NOTE: It might not be appreciated in runtime since it might\r
952 conflict with OS debugging facilities. Turn them off in RELEASE.\r
953\r
954 @param CpuIndex CPU Index\r
955\r
956**/\r
957VOID\r
958EFIAPI\r
959CpuSmmDebugEntry (\r
960 IN UINTN CpuIndex\r
961 )\r
962{\r
963 SMRAM_SAVE_STATE_MAP *CpuSaveState;\r
964 \r
965 if (FeaturePcdGet (PcdCpuSmmDebug)) {\r
3eed6dda 966 CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];\r
f45f2d4a
JY
967 if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {\r
968 AsmWriteDr6 (CpuSaveState->x86._DR6);\r
969 AsmWriteDr7 (CpuSaveState->x86._DR7);\r
970 } else {\r
971 AsmWriteDr6 ((UINTN)CpuSaveState->x64._DR6);\r
972 AsmWriteDr7 ((UINTN)CpuSaveState->x64._DR7);\r
973 }\r
974 }\r
975}\r
976\r
977/**\r
3eed6dda 978 This function restores DR6 & DR7 to SMM save state.\r
f45f2d4a
JY
979\r
980 NOTE: It might not be appreciated in runtime since it might\r
981 conflict with OS debugging facilities. Turn them off in RELEASE.\r
982\r
983 @param CpuIndex CPU Index\r
984\r
985**/\r
986VOID\r
987EFIAPI\r
988CpuSmmDebugExit (\r
989 IN UINTN CpuIndex\r
990 )\r
991{\r
992 SMRAM_SAVE_STATE_MAP *CpuSaveState;\r
993\r
994 if (FeaturePcdGet (PcdCpuSmmDebug)) {\r
3eed6dda 995 CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];\r
f45f2d4a
JY
996 if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {\r
997 CpuSaveState->x86._DR7 = (UINT32)AsmReadDr7 ();\r
998 CpuSaveState->x86._DR6 = (UINT32)AsmReadDr6 ();\r
999 } else {\r
1000 CpuSaveState->x64._DR7 = AsmReadDr7 ();\r
1001 CpuSaveState->x64._DR6 = AsmReadDr6 ();\r
1002 }\r
1003 }\r
1004}\r
1005\r
529a5a86
MK
1006/**\r
1007 C function for SMI entry, each processor comes here upon SMI trigger.\r
1008\r
1009 @param CpuIndex CPU Index\r
1010\r
1011**/\r
1012VOID\r
1013EFIAPI\r
1014SmiRendezvous (\r
1015 IN UINTN CpuIndex\r
1016 )\r
1017{\r
f85d3ce2
JF
1018 EFI_STATUS Status;\r
1019 BOOLEAN ValidSmi;\r
1020 BOOLEAN IsBsp;\r
1021 BOOLEAN BspInProgress;\r
1022 UINTN Index;\r
1023 UINTN Cr2;\r
1024 BOOLEAN XdDisableFlag;\r
1025 MSR_IA32_MISC_ENABLE_REGISTER MiscEnableMsr;\r
529a5a86
MK
1026\r
1027 //\r
1028 // Save Cr2 because Page Fault exception in SMM may override its value\r
1029 //\r
1030 Cr2 = AsmReadCr2 ();\r
1031\r
1032 //\r
1033 // Perform CPU specific entry hooks\r
1034 //\r
1035 SmmCpuFeaturesRendezvousEntry (CpuIndex);\r
1036\r
1037 //\r
1038 // Determine if this is a valid SMI\r
1039 //\r
1040 ValidSmi = PlatformValidSmi();\r
1041\r
1042 //\r
1043 // Determine if BSP has been already in progress. Note this must be checked after\r
1044 // ValidSmi because BSP may clear a valid SMI source after checking in.\r
1045 //\r
1046 BspInProgress = mSmmMpSyncData->InsideSmm;\r
1047\r
1048 if (!BspInProgress && !ValidSmi) {\r
1049 //\r
1050 // If we reach here, it means when we sampled the ValidSmi flag, SMI status had not\r
1051 // been cleared by BSP in a new SMI run (so we have a truly invalid SMI), or SMI\r
1052 // status had been cleared by BSP and an existing SMI run has almost ended. (Note\r
1053 // we sampled ValidSmi flag BEFORE judging BSP-in-progress status.) In both cases, there\r
1054 // is nothing we need to do.\r
1055 //\r
1056 goto Exit;\r
1057 } else {\r
1058 //\r
1059 // Signal presence of this processor\r
1060 //\r
1061 if (ReleaseSemaphore (&mSmmMpSyncData->Counter) == 0) {\r
1062 //\r
1063 // BSP has already ended the synchronization, so QUIT!!!\r
1064 //\r
1065\r
1066 //\r
1067 // Wait for BSP's signal to finish SMI\r
1068 //\r
1069 while (mSmmMpSyncData->AllCpusInSync) {\r
1070 CpuPause ();\r
1071 }\r
1072 goto Exit;\r
1073 } else {\r
1074\r
1075 //\r
1076 // The BUSY lock is initialized to Released state.\r
1077 // This needs to be done early enough to be ready for BSP's SmmStartupThisAp() call.\r
1078 // E.g., with Relaxed AP flow, SmmStartupThisAp() may be called immediately\r
1079 // after AP's present flag is detected.\r
1080 //\r
1081 InitializeSpinLock (&mSmmMpSyncData->CpuData[CpuIndex].Busy);\r
1082 }\r
1083\r
1084 //\r
e1695f8d 1085 // Try to enable XD\r
529a5a86 1086 //\r
e1695f8d 1087 XdDisableFlag = FALSE;\r
529a5a86 1088 if (mXdSupported) {\r
f85d3ce2
JF
1089 MiscEnableMsr.Uint64 = AsmReadMsr64 (MSR_IA32_MISC_ENABLE);\r
1090 if (MiscEnableMsr.Bits.XD == 1) {\r
e1695f8d 1091 XdDisableFlag = TRUE;\r
f85d3ce2
JF
1092 MiscEnableMsr.Bits.XD = 0;\r
1093 AsmWriteMsr64 (MSR_IA32_MISC_ENABLE, MiscEnableMsr.Uint64);\r
e1695f8d 1094 }\r
529a5a86
MK
1095 ActivateXd ();\r
1096 }\r
1097\r
1098 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r
1099 ActivateSmmProfile (CpuIndex);\r
1100 }\r
1101\r
1102 if (BspInProgress) {\r
1103 //\r
1104 // BSP has been elected. Follow AP path, regardless of ValidSmi flag\r
1105 // as BSP may have cleared the SMI status\r
1106 //\r
1107 APHandler (CpuIndex, ValidSmi, mSmmMpSyncData->EffectiveSyncMode);\r
1108 } else {\r
1109 //\r
1110 // We have a valid SMI\r
1111 //\r
1112\r
1113 //\r
1114 // Elect BSP\r
1115 //\r
1116 IsBsp = FALSE;\r
1117 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {\r
1118 if (!mSmmMpSyncData->SwitchBsp || mSmmMpSyncData->CandidateBsp[CpuIndex]) {\r
1119 //\r
1120 // Call platform hook to do BSP election\r
1121 //\r
1122 Status = PlatformSmmBspElection (&IsBsp);\r
1123 if (EFI_SUCCESS == Status) {\r
1124 //\r
1125 // Platform hook determines successfully\r
1126 //\r
1127 if (IsBsp) {\r
1128 mSmmMpSyncData->BspIndex = (UINT32)CpuIndex;\r
1129 }\r
1130 } else {\r
1131 //\r
1132 // Platform hook fails to determine, use default BSP election method\r
1133 //\r
1134 InterlockedCompareExchange32 (\r
1135 (UINT32*)&mSmmMpSyncData->BspIndex,\r
1136 (UINT32)-1,\r
1137 (UINT32)CpuIndex\r
1138 );\r
1139 }\r
1140 }\r
1141 }\r
1142\r
1143 //\r
1144 // "mSmmMpSyncData->BspIndex == CpuIndex" means this is the BSP\r
1145 //\r
1146 if (mSmmMpSyncData->BspIndex == CpuIndex) {\r
1147\r
1148 //\r
1149 // Clear last request for SwitchBsp.\r
1150 //\r
1151 if (mSmmMpSyncData->SwitchBsp) {\r
1152 mSmmMpSyncData->SwitchBsp = FALSE;\r
1153 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
1154 mSmmMpSyncData->CandidateBsp[Index] = FALSE;\r
1155 }\r
1156 }\r
1157\r
1158 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r
1159 SmmProfileRecordSmiNum ();\r
1160 }\r
1161\r
1162 //\r
1163 // BSP Handler is always called with a ValidSmi == TRUE\r
1164 //\r
1165 BSPHandler (CpuIndex, mSmmMpSyncData->EffectiveSyncMode);\r
529a5a86
MK
1166 } else {\r
1167 APHandler (CpuIndex, ValidSmi, mSmmMpSyncData->EffectiveSyncMode);\r
1168 }\r
1169 }\r
1170\r
1171 ASSERT (mSmmMpSyncData->CpuData[CpuIndex].Run == 0);\r
1172\r
1173 //\r
1174 // Wait for BSP's signal to exit SMI\r
1175 //\r
1176 while (mSmmMpSyncData->AllCpusInSync) {\r
1177 CpuPause ();\r
e1695f8d
MK
1178 }\r
1179\r
1180 //\r
1181 // Restore XD\r
1182 //\r
1183 if (XdDisableFlag) {\r
f85d3ce2
JF
1184 MiscEnableMsr.Uint64 = AsmReadMsr64 (MSR_IA32_MISC_ENABLE);\r
1185 MiscEnableMsr.Bits.XD = 1;\r
1186 AsmWriteMsr64 (MSR_IA32_MISC_ENABLE, MiscEnableMsr.Uint64);\r
529a5a86
MK
1187 }\r
1188 }\r
1189\r
1190Exit:\r
1191 SmmCpuFeaturesRendezvousExit (CpuIndex);\r
1192 //\r
1193 // Restore Cr2\r
1194 //\r
1195 AsmWriteCr2 (Cr2);\r
1196}\r
1197\r
1d648531
JF
1198/**\r
1199 Allocate buffer for all semaphores and spin locks.\r
1200\r
1201**/\r
1202VOID\r
1203InitializeSmmCpuSemaphores (\r
1204 VOID\r
1205 )\r
1206{\r
1207 UINTN ProcessorCount;\r
1208 UINTN TotalSize;\r
1209 UINTN GlobalSemaphoresSize;\r
1210 UINTN SemaphoreSize;\r
1211 UINTN Pages;\r
1212 UINTN *SemaphoreBlock;\r
1213 UINTN SemaphoreAddr;\r
1214\r
1215 SemaphoreSize = GetSpinLockProperties ();\r
1216 ProcessorCount = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;\r
1217 GlobalSemaphoresSize = (sizeof (SMM_CPU_SEMAPHORE_GLOBAL) / sizeof (VOID *)) * SemaphoreSize;\r
1218 TotalSize = GlobalSemaphoresSize;\r
1219 DEBUG((EFI_D_INFO, "One Semaphore Size = 0x%x\n", SemaphoreSize));\r
1220 DEBUG((EFI_D_INFO, "Total Semaphores Size = 0x%x\n", TotalSize));\r
1221 Pages = EFI_SIZE_TO_PAGES (TotalSize);\r
1222 SemaphoreBlock = AllocatePages (Pages);\r
1223 ASSERT (SemaphoreBlock != NULL);\r
1224 ZeroMem (SemaphoreBlock, TotalSize);\r
1225\r
1226 SemaphoreAddr = (UINTN)SemaphoreBlock;\r
1227 mSmmCpuSemaphores.SemaphoreGlobal.Counter = (UINT32 *)SemaphoreAddr;\r
1228 SemaphoreAddr += SemaphoreSize;\r
1229 mSmmCpuSemaphores.SemaphoreGlobal.InsideSmm = (BOOLEAN *)SemaphoreAddr;\r
1230 SemaphoreAddr += SemaphoreSize;\r
1231 mSmmCpuSemaphores.SemaphoreGlobal.AllCpusInSync = (BOOLEAN *)SemaphoreAddr;\r
1232 SemaphoreAddr += SemaphoreSize;\r
1233 mSmmCpuSemaphores.SemaphoreGlobal.PFLock = (SPIN_LOCK *)SemaphoreAddr;\r
1234 SemaphoreAddr += SemaphoreSize;\r
1235 mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock\r
1236 = (SPIN_LOCK *)SemaphoreAddr;\r
1237\r
1238 mSemaphoreSize = SemaphoreSize;\r
1239}\r
529a5a86
MK
1240\r
1241/**\r
1242 Initialize un-cacheable data.\r
1243\r
1244**/\r
1245VOID\r
1246EFIAPI\r
1247InitializeMpSyncData (\r
1248 VOID\r
1249 )\r
1250{\r
1251 if (mSmmMpSyncData != NULL) {\r
1252 ZeroMem (mSmmMpSyncData, mSmmMpSyncDataSize);\r
1253 mSmmMpSyncData->CpuData = (SMM_CPU_DATA_BLOCK *)((UINT8 *)mSmmMpSyncData + sizeof (SMM_DISPATCHER_MP_SYNC_DATA));\r
1254 mSmmMpSyncData->CandidateBsp = (BOOLEAN *)(mSmmMpSyncData->CpuData + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus);\r
1255 if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {\r
1256 //\r
1257 // Enable BSP election by setting BspIndex to -1\r
1258 //\r
1259 mSmmMpSyncData->BspIndex = (UINT32)-1;\r
1260 }\r
1261 mSmmMpSyncData->EffectiveSyncMode = (SMM_CPU_SYNC_MODE) PcdGet8 (PcdCpuSmmSyncMode);\r
1d648531
JF
1262\r
1263 InitializeSmmCpuSemaphores ();\r
529a5a86
MK
1264 }\r
1265}\r
1266\r
1267/**\r
1268 Initialize global data for MP synchronization.\r
1269\r
1270 @param Stacks Base address of SMI stack buffer for all processors.\r
1271 @param StackSize Stack size for each processor in SMM.\r
1272\r
1273**/\r
1274UINT32\r
1275InitializeMpServiceData (\r
1276 IN VOID *Stacks,\r
1277 IN UINTN StackSize\r
1278 )\r
1279{\r
1280 UINT32 Cr3;\r
1281 UINTN Index;\r
1282 MTRR_SETTINGS *Mtrr;\r
1283 PROCESSOR_SMM_DESCRIPTOR *Psd;\r
529a5a86 1284 UINT8 *GdtTssTables;\r
529a5a86
MK
1285 UINTN GdtTableStepSize;\r
1286\r
1287 //\r
1288 // Initialize physical address mask\r
1289 // NOTE: Physical memory above virtual address limit is not supported !!!\r
1290 //\r
1291 AsmCpuid (0x80000008, (UINT32*)&Index, NULL, NULL, NULL);\r
1292 gPhyMask = LShiftU64 (1, (UINT8)Index) - 1;\r
1293 gPhyMask &= (1ull << 48) - EFI_PAGE_SIZE;\r
1294\r
1295 //\r
1296 // Create page tables\r
1297 //\r
1298 Cr3 = SmmInitPageTable ();\r
1299\r
fe5f1949 1300 GdtTssTables = InitGdt (Cr3, &GdtTableStepSize);\r
529a5a86
MK
1301\r
1302 //\r
1303 // Initialize PROCESSOR_SMM_DESCRIPTOR for each CPU\r
1304 //\r
1305 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
1306 Psd = (PROCESSOR_SMM_DESCRIPTOR *)(VOID *)(UINTN)(mCpuHotPlugData.SmBase[Index] + SMM_PSD_OFFSET);\r
1307 CopyMem (Psd, &gcPsd, sizeof (gcPsd));\r
fe5f1949
JY
1308 Psd->SmmGdtPtr = (UINT64)(UINTN)(GdtTssTables + GdtTableStepSize * Index);\r
1309 Psd->SmmGdtSize = gcSmiGdtr.Limit + 1;\r
529a5a86
MK
1310\r
1311 //\r
1312 // Install SMI handler\r
1313 //\r
1314 InstallSmiHandler (\r
1315 Index,\r
1316 (UINT32)mCpuHotPlugData.SmBase[Index],\r
1317 (VOID*)((UINTN)Stacks + (StackSize * Index)),\r
1318 StackSize,\r
1319 (UINTN)Psd->SmmGdtPtr,\r
1320 Psd->SmmGdtSize,\r
1321 gcSmiIdtr.Base,\r
1322 gcSmiIdtr.Limit + 1,\r
1323 Cr3\r
1324 );\r
1325 }\r
1326\r
1327 //\r
1328 // Initialize mSmmMpSyncData\r
1329 //\r
1330 mSmmMpSyncDataSize = sizeof (SMM_DISPATCHER_MP_SYNC_DATA) +\r
1331 (sizeof (SMM_CPU_DATA_BLOCK) + sizeof (BOOLEAN)) * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;\r
1332 mSmmMpSyncData = (SMM_DISPATCHER_MP_SYNC_DATA*) AllocatePages (EFI_SIZE_TO_PAGES (mSmmMpSyncDataSize));\r
1333 ASSERT (mSmmMpSyncData != NULL);\r
1334 InitializeMpSyncData ();\r
1335\r
1336 //\r
1337 // Record current MTRR settings\r
1338 //\r
1339 ZeroMem(gSmiMtrrs, sizeof (gSmiMtrrs));\r
1340 Mtrr = (MTRR_SETTINGS*)gSmiMtrrs;\r
1341 MtrrGetAllMtrrs (Mtrr);\r
1342\r
1343 return Cr3;\r
1344}\r
1345\r
1346/**\r
1347\r
1348 Register the SMM Foundation entry point.\r
1349\r
1350 @param This Pointer to EFI_SMM_CONFIGURATION_PROTOCOL instance\r
1351 @param SmmEntryPoint SMM Foundation EntryPoint\r
1352\r
1353 @retval EFI_SUCCESS Successfully to register SMM foundation entry point\r
1354\r
1355**/\r
1356EFI_STATUS\r
1357EFIAPI\r
1358RegisterSmmEntry (\r
1359 IN CONST EFI_SMM_CONFIGURATION_PROTOCOL *This,\r
1360 IN EFI_SMM_ENTRY_POINT SmmEntryPoint\r
1361 )\r
1362{\r
1363 //\r
1364 // Record SMM Foundation EntryPoint, later invoke it on SMI entry vector.\r
1365 //\r
1366 gSmmCpuPrivate->SmmCoreEntry = SmmEntryPoint;\r
1367 return EFI_SUCCESS;\r
1368}\r