]>
Commit | Line | Data |
---|---|---|
1 | /** @file\r | |
2 | SMM MP service implementation\r | |
3 | \r | |
4 | Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.<BR>\r | |
5 | This program and the accompanying materials\r | |
6 | are licensed and made available under the terms and conditions of the BSD License\r | |
7 | which accompanies this distribution. The full text of the license may be found at\r | |
8 | http://opensource.org/licenses/bsd-license.php\r | |
9 | \r | |
10 | THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r | |
11 | WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r | |
12 | \r | |
13 | **/\r | |
14 | \r | |
15 | #include "PiSmmCpuDxeSmm.h"\r | |
16 | \r | |
17 | //\r | |
18 | // Slots for all MTRR( FIXED MTRR + VARIABLE MTRR + MTRR_LIB_IA32_MTRR_DEF_TYPE)\r | |
19 | //\r | |
20 | UINT64 gSmiMtrrs[MTRR_NUMBER_OF_FIXED_MTRR + 2 * MTRR_NUMBER_OF_VARIABLE_MTRR + 1];\r | |
21 | UINT64 gPhyMask;\r | |
22 | SMM_DISPATCHER_MP_SYNC_DATA *mSmmMpSyncData = NULL;\r | |
23 | UINTN mSmmMpSyncDataSize;\r | |
24 | SMM_CPU_SEMAPHORES mSmmCpuSemaphores;\r | |
25 | UINTN mSemaphoreSize;\r | |
26 | SPIN_LOCK *mPFLock = NULL;\r | |
27 | \r | |
28 | /**\r | |
29 | Performs an atomic compare exchange operation to get semaphore.\r | |
30 | The compare exchange operation must be performed using\r | |
31 | MP safe mechanisms.\r | |
32 | \r | |
33 | @param Sem IN: 32-bit unsigned integer\r | |
34 | OUT: original integer - 1\r | |
35 | @return Original integer - 1\r | |
36 | \r | |
37 | **/\r | |
38 | UINT32\r | |
39 | WaitForSemaphore (\r | |
40 | IN OUT volatile UINT32 *Sem\r | |
41 | )\r | |
42 | {\r | |
43 | UINT32 Value;\r | |
44 | \r | |
45 | do {\r | |
46 | Value = *Sem;\r | |
47 | } while (Value == 0 ||\r | |
48 | InterlockedCompareExchange32 (\r | |
49 | (UINT32*)Sem,\r | |
50 | Value,\r | |
51 | Value - 1\r | |
52 | ) != Value);\r | |
53 | return Value - 1;\r | |
54 | }\r | |
55 | \r | |
56 | \r | |
57 | /**\r | |
58 | Performs an atomic compare exchange operation to release semaphore.\r | |
59 | The compare exchange operation must be performed using\r | |
60 | MP safe mechanisms.\r | |
61 | \r | |
62 | @param Sem IN: 32-bit unsigned integer\r | |
63 | OUT: original integer + 1\r | |
64 | @return Original integer + 1\r | |
65 | \r | |
66 | **/\r | |
67 | UINT32\r | |
68 | ReleaseSemaphore (\r | |
69 | IN OUT volatile UINT32 *Sem\r | |
70 | )\r | |
71 | {\r | |
72 | UINT32 Value;\r | |
73 | \r | |
74 | do {\r | |
75 | Value = *Sem;\r | |
76 | } while (Value + 1 != 0 &&\r | |
77 | InterlockedCompareExchange32 (\r | |
78 | (UINT32*)Sem,\r | |
79 | Value,\r | |
80 | Value + 1\r | |
81 | ) != Value);\r | |
82 | return Value + 1;\r | |
83 | }\r | |
84 | \r | |
85 | /**\r | |
86 | Performs an atomic compare exchange operation to lock semaphore.\r | |
87 | The compare exchange operation must be performed using\r | |
88 | MP safe mechanisms.\r | |
89 | \r | |
90 | @param Sem IN: 32-bit unsigned integer\r | |
91 | OUT: -1\r | |
92 | @return Original integer\r | |
93 | \r | |
94 | **/\r | |
95 | UINT32\r | |
96 | LockdownSemaphore (\r | |
97 | IN OUT volatile UINT32 *Sem\r | |
98 | )\r | |
99 | {\r | |
100 | UINT32 Value;\r | |
101 | \r | |
102 | do {\r | |
103 | Value = *Sem;\r | |
104 | } while (InterlockedCompareExchange32 (\r | |
105 | (UINT32*)Sem,\r | |
106 | Value, (UINT32)-1\r | |
107 | ) != Value);\r | |
108 | return Value;\r | |
109 | }\r | |
110 | \r | |
111 | /**\r | |
112 | Wait all APs to performs an atomic compare exchange operation to release semaphore.\r | |
113 | \r | |
114 | @param NumberOfAPs AP number\r | |
115 | \r | |
116 | **/\r | |
117 | VOID\r | |
118 | WaitForAllAPs (\r | |
119 | IN UINTN NumberOfAPs\r | |
120 | )\r | |
121 | {\r | |
122 | UINTN BspIndex;\r | |
123 | \r | |
124 | BspIndex = mSmmMpSyncData->BspIndex;\r | |
125 | while (NumberOfAPs-- > 0) {\r | |
126 | WaitForSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r | |
127 | }\r | |
128 | }\r | |
129 | \r | |
130 | /**\r | |
131 | Performs an atomic compare exchange operation to release semaphore\r | |
132 | for each AP.\r | |
133 | \r | |
134 | **/\r | |
135 | VOID\r | |
136 | ReleaseAllAPs (\r | |
137 | VOID\r | |
138 | )\r | |
139 | {\r | |
140 | UINTN Index;\r | |
141 | UINTN BspIndex;\r | |
142 | \r | |
143 | BspIndex = mSmmMpSyncData->BspIndex;\r | |
144 | for (Index = mMaxNumberOfCpus; Index-- > 0;) {\r | |
145 | if (Index != BspIndex && *(mSmmMpSyncData->CpuData[Index].Present)) {\r | |
146 | ReleaseSemaphore (mSmmMpSyncData->CpuData[Index].Run);\r | |
147 | }\r | |
148 | }\r | |
149 | }\r | |
150 | \r | |
151 | /**\r | |
152 | Checks if all CPUs (with certain exceptions) have checked in for this SMI run\r | |
153 | \r | |
154 | @param Exceptions CPU Arrival exception flags.\r | |
155 | \r | |
156 | @retval TRUE if all CPUs the have checked in.\r | |
157 | @retval FALSE if at least one Normal AP hasn't checked in.\r | |
158 | \r | |
159 | **/\r | |
160 | BOOLEAN\r | |
161 | AllCpusInSmmWithExceptions (\r | |
162 | SMM_CPU_ARRIVAL_EXCEPTIONS Exceptions\r | |
163 | )\r | |
164 | {\r | |
165 | UINTN Index;\r | |
166 | SMM_CPU_DATA_BLOCK *CpuData;\r | |
167 | EFI_PROCESSOR_INFORMATION *ProcessorInfo;\r | |
168 | \r | |
169 | ASSERT (*mSmmMpSyncData->Counter <= mNumberOfCpus);\r | |
170 | \r | |
171 | if (*mSmmMpSyncData->Counter == mNumberOfCpus) {\r | |
172 | return TRUE;\r | |
173 | }\r | |
174 | \r | |
175 | CpuData = mSmmMpSyncData->CpuData;\r | |
176 | ProcessorInfo = gSmmCpuPrivate->ProcessorInfo;\r | |
177 | for (Index = mMaxNumberOfCpus; Index-- > 0;) {\r | |
178 | if (!(*(CpuData[Index].Present)) && ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID) {\r | |
179 | if (((Exceptions & ARRIVAL_EXCEPTION_DELAYED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmDelayed) != 0) {\r | |
180 | continue;\r | |
181 | }\r | |
182 | if (((Exceptions & ARRIVAL_EXCEPTION_BLOCKED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmBlocked) != 0) {\r | |
183 | continue;\r | |
184 | }\r | |
185 | if (((Exceptions & ARRIVAL_EXCEPTION_SMI_DISABLED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmEnable) != 0) {\r | |
186 | continue;\r | |
187 | }\r | |
188 | return FALSE;\r | |
189 | }\r | |
190 | }\r | |
191 | \r | |
192 | \r | |
193 | return TRUE;\r | |
194 | }\r | |
195 | \r | |
196 | \r | |
197 | /**\r | |
198 | Given timeout constraint, wait for all APs to arrive, and insure when this function returns, no AP will execute normal mode code before\r | |
199 | entering SMM, except SMI disabled APs.\r | |
200 | \r | |
201 | **/\r | |
202 | VOID\r | |
203 | SmmWaitForApArrival (\r | |
204 | VOID\r | |
205 | )\r | |
206 | {\r | |
207 | UINT64 Timer;\r | |
208 | UINTN Index;\r | |
209 | \r | |
210 | ASSERT (*mSmmMpSyncData->Counter <= mNumberOfCpus);\r | |
211 | \r | |
212 | //\r | |
213 | // Platform implementor should choose a timeout value appropriately:\r | |
214 | // - The timeout value should balance the SMM time constrains and the likelihood that delayed CPUs are excluded in the SMM run. Note\r | |
215 | // the SMI Handlers must ALWAYS take into account the cases that not all APs are available in an SMI run.\r | |
216 | // - The timeout value must, in the case of 2nd timeout, be at least long enough to give time for all APs to receive the SMI IPI\r | |
217 | // and either enter SMM or buffer the SMI, to insure there is no CPU running normal mode code when SMI handling starts. This will\r | |
218 | // be TRUE even if a blocked CPU is brought out of the blocked state by a normal mode CPU (before the normal mode CPU received the\r | |
219 | // SMI IPI), because with a buffered SMI, and CPU will enter SMM immediately after it is brought out of the blocked state.\r | |
220 | // - The timeout value must be longer than longest possible IO operation in the system\r | |
221 | //\r | |
222 | \r | |
223 | //\r | |
224 | // Sync with APs 1st timeout\r | |
225 | //\r | |
226 | for (Timer = StartSyncTimer ();\r | |
227 | !IsSyncTimerTimeout (Timer) &&\r | |
228 | !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED | ARRIVAL_EXCEPTION_SMI_DISABLED );\r | |
229 | ) {\r | |
230 | CpuPause ();\r | |
231 | }\r | |
232 | \r | |
233 | //\r | |
234 | // Not all APs have arrived, so we need 2nd round of timeout. IPIs should be sent to ALL none present APs,\r | |
235 | // because:\r | |
236 | // a) Delayed AP may have just come out of the delayed state. Blocked AP may have just been brought out of blocked state by some AP running\r | |
237 | // normal mode code. These APs need to be guaranteed to have an SMI pending to insure that once they are out of delayed / blocked state, they\r | |
238 | // enter SMI immediately without executing instructions in normal mode. Note traditional flow requires there are no APs doing normal mode\r | |
239 | // work while SMI handling is on-going.\r | |
240 | // b) As a consequence of SMI IPI sending, (spurious) SMI may occur after this SMM run.\r | |
241 | // c) ** NOTE **: Use SMI disabling feature VERY CAREFULLY (if at all) for traditional flow, because a processor in SMI-disabled state\r | |
242 | // will execute normal mode code, which breaks the traditional SMI handlers' assumption that no APs are doing normal\r | |
243 | // mode work while SMI handling is on-going.\r | |
244 | // d) We don't add code to check SMI disabling status to skip sending IPI to SMI disabled APs, because:\r | |
245 | // - In traditional flow, SMI disabling is discouraged.\r | |
246 | // - In relaxed flow, CheckApArrival() will check SMI disabling status before calling this function.\r | |
247 | // In both cases, adding SMI-disabling checking code increases overhead.\r | |
248 | //\r | |
249 | if (*mSmmMpSyncData->Counter < mNumberOfCpus) {\r | |
250 | //\r | |
251 | // Send SMI IPIs to bring outside processors in\r | |
252 | //\r | |
253 | for (Index = mMaxNumberOfCpus; Index-- > 0;) {\r | |
254 | if (!(*(mSmmMpSyncData->CpuData[Index].Present)) && gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID) {\r | |
255 | SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId);\r | |
256 | }\r | |
257 | }\r | |
258 | \r | |
259 | //\r | |
260 | // Sync with APs 2nd timeout.\r | |
261 | //\r | |
262 | for (Timer = StartSyncTimer ();\r | |
263 | !IsSyncTimerTimeout (Timer) &&\r | |
264 | !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED | ARRIVAL_EXCEPTION_SMI_DISABLED );\r | |
265 | ) {\r | |
266 | CpuPause ();\r | |
267 | }\r | |
268 | }\r | |
269 | \r | |
270 | return;\r | |
271 | }\r | |
272 | \r | |
273 | \r | |
274 | /**\r | |
275 | Replace OS MTRR's with SMI MTRR's.\r | |
276 | \r | |
277 | @param CpuIndex Processor Index\r | |
278 | \r | |
279 | **/\r | |
280 | VOID\r | |
281 | ReplaceOSMtrrs (\r | |
282 | IN UINTN CpuIndex\r | |
283 | )\r | |
284 | {\r | |
285 | PROCESSOR_SMM_DESCRIPTOR *Psd;\r | |
286 | UINT64 *SmiMtrrs;\r | |
287 | MTRR_SETTINGS *BiosMtrr;\r | |
288 | \r | |
289 | Psd = (PROCESSOR_SMM_DESCRIPTOR*)(mCpuHotPlugData.SmBase[CpuIndex] + SMM_PSD_OFFSET);\r | |
290 | SmiMtrrs = (UINT64*)(UINTN)Psd->MtrrBaseMaskPtr;\r | |
291 | \r | |
292 | SmmCpuFeaturesDisableSmrr ();\r | |
293 | \r | |
294 | //\r | |
295 | // Replace all MTRRs registers\r | |
296 | //\r | |
297 | BiosMtrr = (MTRR_SETTINGS*)SmiMtrrs;\r | |
298 | MtrrSetAllMtrrs(BiosMtrr);\r | |
299 | }\r | |
300 | \r | |
301 | /**\r | |
302 | SMI handler for BSP.\r | |
303 | \r | |
304 | @param CpuIndex BSP processor Index\r | |
305 | @param SyncMode SMM MP sync mode\r | |
306 | \r | |
307 | **/\r | |
308 | VOID\r | |
309 | BSPHandler (\r | |
310 | IN UINTN CpuIndex,\r | |
311 | IN SMM_CPU_SYNC_MODE SyncMode\r | |
312 | )\r | |
313 | {\r | |
314 | UINTN Index;\r | |
315 | MTRR_SETTINGS Mtrrs;\r | |
316 | UINTN ApCount;\r | |
317 | BOOLEAN ClearTopLevelSmiResult;\r | |
318 | UINTN PresentCount;\r | |
319 | \r | |
320 | ASSERT (CpuIndex == mSmmMpSyncData->BspIndex);\r | |
321 | ApCount = 0;\r | |
322 | \r | |
323 | //\r | |
324 | // Flag BSP's presence\r | |
325 | //\r | |
326 | *mSmmMpSyncData->InsideSmm = TRUE;\r | |
327 | \r | |
328 | //\r | |
329 | // Initialize Debug Agent to start source level debug in BSP handler\r | |
330 | //\r | |
331 | InitializeDebugAgent (DEBUG_AGENT_INIT_ENTER_SMI, NULL, NULL);\r | |
332 | \r | |
333 | //\r | |
334 | // Mark this processor's presence\r | |
335 | //\r | |
336 | *(mSmmMpSyncData->CpuData[CpuIndex].Present) = TRUE;\r | |
337 | \r | |
338 | //\r | |
339 | // Clear platform top level SMI status bit before calling SMI handlers. If\r | |
340 | // we cleared it after SMI handlers are run, we would miss the SMI that\r | |
341 | // occurs after SMI handlers are done and before SMI status bit is cleared.\r | |
342 | //\r | |
343 | ClearTopLevelSmiResult = ClearTopLevelSmiStatus();\r | |
344 | ASSERT (ClearTopLevelSmiResult == TRUE);\r | |
345 | \r | |
346 | //\r | |
347 | // Set running processor index\r | |
348 | //\r | |
349 | gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu = CpuIndex;\r | |
350 | \r | |
351 | //\r | |
352 | // If Traditional Sync Mode or need to configure MTRRs: gather all available APs.\r | |
353 | //\r | |
354 | if (SyncMode == SmmCpuSyncModeTradition || SmmCpuFeaturesNeedConfigureMtrrs()) {\r | |
355 | \r | |
356 | //\r | |
357 | // Wait for APs to arrive\r | |
358 | //\r | |
359 | SmmWaitForApArrival();\r | |
360 | \r | |
361 | //\r | |
362 | // Lock the counter down and retrieve the number of APs\r | |
363 | //\r | |
364 | *mSmmMpSyncData->AllCpusInSync = TRUE;\r | |
365 | ApCount = LockdownSemaphore (mSmmMpSyncData->Counter) - 1;\r | |
366 | \r | |
367 | //\r | |
368 | // Wait for all APs to get ready for programming MTRRs\r | |
369 | //\r | |
370 | WaitForAllAPs (ApCount);\r | |
371 | \r | |
372 | if (SmmCpuFeaturesNeedConfigureMtrrs()) {\r | |
373 | //\r | |
374 | // Signal all APs it's time for backup MTRRs\r | |
375 | //\r | |
376 | ReleaseAllAPs ();\r | |
377 | \r | |
378 | //\r | |
379 | // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at\r | |
380 | // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set\r | |
381 | // to a large enough value to avoid this situation.\r | |
382 | // Note: For HT capable CPUs, threads within a core share the same set of MTRRs.\r | |
383 | // We do the backup first and then set MTRR to avoid race condition for threads\r | |
384 | // in the same core.\r | |
385 | //\r | |
386 | MtrrGetAllMtrrs(&Mtrrs);\r | |
387 | \r | |
388 | //\r | |
389 | // Wait for all APs to complete their MTRR saving\r | |
390 | //\r | |
391 | WaitForAllAPs (ApCount);\r | |
392 | \r | |
393 | //\r | |
394 | // Let all processors program SMM MTRRs together\r | |
395 | //\r | |
396 | ReleaseAllAPs ();\r | |
397 | \r | |
398 | //\r | |
399 | // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at\r | |
400 | // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set\r | |
401 | // to a large enough value to avoid this situation.\r | |
402 | //\r | |
403 | ReplaceOSMtrrs (CpuIndex);\r | |
404 | \r | |
405 | //\r | |
406 | // Wait for all APs to complete their MTRR programming\r | |
407 | //\r | |
408 | WaitForAllAPs (ApCount);\r | |
409 | }\r | |
410 | }\r | |
411 | \r | |
412 | //\r | |
413 | // The BUSY lock is initialized to Acquired state\r | |
414 | //\r | |
415 | AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r | |
416 | \r | |
417 | //\r | |
418 | // Perform the pre tasks\r | |
419 | //\r | |
420 | PerformPreTasks ();\r | |
421 | \r | |
422 | //\r | |
423 | // Invoke SMM Foundation EntryPoint with the processor information context.\r | |
424 | //\r | |
425 | gSmmCpuPrivate->SmmCoreEntry (&gSmmCpuPrivate->SmmCoreEntryContext);\r | |
426 | \r | |
427 | //\r | |
428 | // Make sure all APs have completed their pending none-block tasks\r | |
429 | //\r | |
430 | for (Index = mMaxNumberOfCpus; Index-- > 0;) {\r | |
431 | if (Index != CpuIndex && *(mSmmMpSyncData->CpuData[Index].Present)) {\r | |
432 | AcquireSpinLock (mSmmMpSyncData->CpuData[Index].Busy);\r | |
433 | ReleaseSpinLock (mSmmMpSyncData->CpuData[Index].Busy);\r | |
434 | }\r | |
435 | }\r | |
436 | \r | |
437 | //\r | |
438 | // Perform the remaining tasks\r | |
439 | //\r | |
440 | PerformRemainingTasks ();\r | |
441 | \r | |
442 | //\r | |
443 | // If Relaxed-AP Sync Mode: gather all available APs after BSP SMM handlers are done, and\r | |
444 | // make those APs to exit SMI synchronously. APs which arrive later will be excluded and\r | |
445 | // will run through freely.\r | |
446 | //\r | |
447 | if (SyncMode != SmmCpuSyncModeTradition && !SmmCpuFeaturesNeedConfigureMtrrs()) {\r | |
448 | \r | |
449 | //\r | |
450 | // Lock the counter down and retrieve the number of APs\r | |
451 | //\r | |
452 | *mSmmMpSyncData->AllCpusInSync = TRUE;\r | |
453 | ApCount = LockdownSemaphore (mSmmMpSyncData->Counter) - 1;\r | |
454 | //\r | |
455 | // Make sure all APs have their Present flag set\r | |
456 | //\r | |
457 | while (TRUE) {\r | |
458 | PresentCount = 0;\r | |
459 | for (Index = mMaxNumberOfCpus; Index-- > 0;) {\r | |
460 | if (*(mSmmMpSyncData->CpuData[Index].Present)) {\r | |
461 | PresentCount ++;\r | |
462 | }\r | |
463 | }\r | |
464 | if (PresentCount > ApCount) {\r | |
465 | break;\r | |
466 | }\r | |
467 | }\r | |
468 | }\r | |
469 | \r | |
470 | //\r | |
471 | // Notify all APs to exit\r | |
472 | //\r | |
473 | *mSmmMpSyncData->InsideSmm = FALSE;\r | |
474 | ReleaseAllAPs ();\r | |
475 | \r | |
476 | //\r | |
477 | // Wait for all APs to complete their pending tasks\r | |
478 | //\r | |
479 | WaitForAllAPs (ApCount);\r | |
480 | \r | |
481 | if (SmmCpuFeaturesNeedConfigureMtrrs()) {\r | |
482 | //\r | |
483 | // Signal APs to restore MTRRs\r | |
484 | //\r | |
485 | ReleaseAllAPs ();\r | |
486 | \r | |
487 | //\r | |
488 | // Restore OS MTRRs\r | |
489 | //\r | |
490 | SmmCpuFeaturesReenableSmrr ();\r | |
491 | MtrrSetAllMtrrs(&Mtrrs);\r | |
492 | \r | |
493 | //\r | |
494 | // Wait for all APs to complete MTRR programming\r | |
495 | //\r | |
496 | WaitForAllAPs (ApCount);\r | |
497 | }\r | |
498 | \r | |
499 | //\r | |
500 | // Stop source level debug in BSP handler, the code below will not be\r | |
501 | // debugged.\r | |
502 | //\r | |
503 | InitializeDebugAgent (DEBUG_AGENT_INIT_EXIT_SMI, NULL, NULL);\r | |
504 | \r | |
505 | //\r | |
506 | // Signal APs to Reset states/semaphore for this processor\r | |
507 | //\r | |
508 | ReleaseAllAPs ();\r | |
509 | \r | |
510 | //\r | |
511 | // Perform pending operations for hot-plug\r | |
512 | //\r | |
513 | SmmCpuUpdate ();\r | |
514 | \r | |
515 | //\r | |
516 | // Clear the Present flag of BSP\r | |
517 | //\r | |
518 | *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;\r | |
519 | \r | |
520 | //\r | |
521 | // Gather APs to exit SMM synchronously. Note the Present flag is cleared by now but\r | |
522 | // WaitForAllAps does not depend on the Present flag.\r | |
523 | //\r | |
524 | WaitForAllAPs (ApCount);\r | |
525 | \r | |
526 | //\r | |
527 | // Reset BspIndex to -1, meaning BSP has not been elected.\r | |
528 | //\r | |
529 | if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {\r | |
530 | mSmmMpSyncData->BspIndex = (UINT32)-1;\r | |
531 | }\r | |
532 | \r | |
533 | //\r | |
534 | // Allow APs to check in from this point on\r | |
535 | //\r | |
536 | *mSmmMpSyncData->Counter = 0;\r | |
537 | *mSmmMpSyncData->AllCpusInSync = FALSE;\r | |
538 | }\r | |
539 | \r | |
540 | /**\r | |
541 | SMI handler for AP.\r | |
542 | \r | |
543 | @param CpuIndex AP processor Index.\r | |
544 | @param ValidSmi Indicates that current SMI is a valid SMI or not.\r | |
545 | @param SyncMode SMM MP sync mode.\r | |
546 | \r | |
547 | **/\r | |
548 | VOID\r | |
549 | APHandler (\r | |
550 | IN UINTN CpuIndex,\r | |
551 | IN BOOLEAN ValidSmi,\r | |
552 | IN SMM_CPU_SYNC_MODE SyncMode\r | |
553 | )\r | |
554 | {\r | |
555 | UINT64 Timer;\r | |
556 | UINTN BspIndex;\r | |
557 | MTRR_SETTINGS Mtrrs;\r | |
558 | \r | |
559 | //\r | |
560 | // Timeout BSP\r | |
561 | //\r | |
562 | for (Timer = StartSyncTimer ();\r | |
563 | !IsSyncTimerTimeout (Timer) &&\r | |
564 | !(*mSmmMpSyncData->InsideSmm);\r | |
565 | ) {\r | |
566 | CpuPause ();\r | |
567 | }\r | |
568 | \r | |
569 | if (!(*mSmmMpSyncData->InsideSmm)) {\r | |
570 | //\r | |
571 | // BSP timeout in the first round\r | |
572 | //\r | |
573 | if (mSmmMpSyncData->BspIndex != -1) {\r | |
574 | //\r | |
575 | // BSP Index is known\r | |
576 | //\r | |
577 | BspIndex = mSmmMpSyncData->BspIndex;\r | |
578 | ASSERT (CpuIndex != BspIndex);\r | |
579 | \r | |
580 | //\r | |
581 | // Send SMI IPI to bring BSP in\r | |
582 | //\r | |
583 | SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[BspIndex].ProcessorId);\r | |
584 | \r | |
585 | //\r | |
586 | // Now clock BSP for the 2nd time\r | |
587 | //\r | |
588 | for (Timer = StartSyncTimer ();\r | |
589 | !IsSyncTimerTimeout (Timer) &&\r | |
590 | !(*mSmmMpSyncData->InsideSmm);\r | |
591 | ) {\r | |
592 | CpuPause ();\r | |
593 | }\r | |
594 | \r | |
595 | if (!(*mSmmMpSyncData->InsideSmm)) {\r | |
596 | //\r | |
597 | // Give up since BSP is unable to enter SMM\r | |
598 | // and signal the completion of this AP\r | |
599 | WaitForSemaphore (mSmmMpSyncData->Counter);\r | |
600 | return;\r | |
601 | }\r | |
602 | } else {\r | |
603 | //\r | |
604 | // Don't know BSP index. Give up without sending IPI to BSP.\r | |
605 | //\r | |
606 | WaitForSemaphore (mSmmMpSyncData->Counter);\r | |
607 | return;\r | |
608 | }\r | |
609 | }\r | |
610 | \r | |
611 | //\r | |
612 | // BSP is available\r | |
613 | //\r | |
614 | BspIndex = mSmmMpSyncData->BspIndex;\r | |
615 | ASSERT (CpuIndex != BspIndex);\r | |
616 | \r | |
617 | //\r | |
618 | // Mark this processor's presence\r | |
619 | //\r | |
620 | *(mSmmMpSyncData->CpuData[CpuIndex].Present) = TRUE;\r | |
621 | \r | |
622 | if (SyncMode == SmmCpuSyncModeTradition || SmmCpuFeaturesNeedConfigureMtrrs()) {\r | |
623 | //\r | |
624 | // Notify BSP of arrival at this point\r | |
625 | //\r | |
626 | ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r | |
627 | }\r | |
628 | \r | |
629 | if (SmmCpuFeaturesNeedConfigureMtrrs()) {\r | |
630 | //\r | |
631 | // Wait for the signal from BSP to backup MTRRs\r | |
632 | //\r | |
633 | WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r | |
634 | \r | |
635 | //\r | |
636 | // Backup OS MTRRs\r | |
637 | //\r | |
638 | MtrrGetAllMtrrs(&Mtrrs);\r | |
639 | \r | |
640 | //\r | |
641 | // Signal BSP the completion of this AP\r | |
642 | //\r | |
643 | ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r | |
644 | \r | |
645 | //\r | |
646 | // Wait for BSP's signal to program MTRRs\r | |
647 | //\r | |
648 | WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r | |
649 | \r | |
650 | //\r | |
651 | // Replace OS MTRRs with SMI MTRRs\r | |
652 | //\r | |
653 | ReplaceOSMtrrs (CpuIndex);\r | |
654 | \r | |
655 | //\r | |
656 | // Signal BSP the completion of this AP\r | |
657 | //\r | |
658 | ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r | |
659 | }\r | |
660 | \r | |
661 | while (TRUE) {\r | |
662 | //\r | |
663 | // Wait for something to happen\r | |
664 | //\r | |
665 | WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r | |
666 | \r | |
667 | //\r | |
668 | // Check if BSP wants to exit SMM\r | |
669 | //\r | |
670 | if (!(*mSmmMpSyncData->InsideSmm)) {\r | |
671 | break;\r | |
672 | }\r | |
673 | \r | |
674 | //\r | |
675 | // BUSY should be acquired by SmmStartupThisAp()\r | |
676 | //\r | |
677 | ASSERT (\r | |
678 | !AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[CpuIndex].Busy)\r | |
679 | );\r | |
680 | \r | |
681 | //\r | |
682 | // Invoke the scheduled procedure\r | |
683 | //\r | |
684 | (*mSmmMpSyncData->CpuData[CpuIndex].Procedure) (\r | |
685 | (VOID*)mSmmMpSyncData->CpuData[CpuIndex].Parameter\r | |
686 | );\r | |
687 | \r | |
688 | //\r | |
689 | // Release BUSY\r | |
690 | //\r | |
691 | ReleaseSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r | |
692 | }\r | |
693 | \r | |
694 | if (SmmCpuFeaturesNeedConfigureMtrrs()) {\r | |
695 | //\r | |
696 | // Notify BSP the readiness of this AP to program MTRRs\r | |
697 | //\r | |
698 | ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r | |
699 | \r | |
700 | //\r | |
701 | // Wait for the signal from BSP to program MTRRs\r | |
702 | //\r | |
703 | WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r | |
704 | \r | |
705 | //\r | |
706 | // Restore OS MTRRs\r | |
707 | //\r | |
708 | SmmCpuFeaturesReenableSmrr ();\r | |
709 | MtrrSetAllMtrrs(&Mtrrs);\r | |
710 | }\r | |
711 | \r | |
712 | //\r | |
713 | // Notify BSP the readiness of this AP to Reset states/semaphore for this processor\r | |
714 | //\r | |
715 | ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r | |
716 | \r | |
717 | //\r | |
718 | // Wait for the signal from BSP to Reset states/semaphore for this processor\r | |
719 | //\r | |
720 | WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r | |
721 | \r | |
722 | //\r | |
723 | // Reset states/semaphore for this processor\r | |
724 | //\r | |
725 | *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;\r | |
726 | \r | |
727 | //\r | |
728 | // Notify BSP the readiness of this AP to exit SMM\r | |
729 | //\r | |
730 | ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);\r | |
731 | \r | |
732 | }\r | |
733 | \r | |
734 | /**\r | |
735 | Create 4G PageTable in SMRAM.\r | |
736 | \r | |
737 | @param ExtraPages Additional page numbers besides for 4G memory\r | |
738 | @param Is32BitPageTable Whether the page table is 32-bit PAE\r | |
739 | @return PageTable Address\r | |
740 | \r | |
741 | **/\r | |
742 | UINT32\r | |
743 | Gen4GPageTable (\r | |
744 | IN UINTN ExtraPages,\r | |
745 | IN BOOLEAN Is32BitPageTable\r | |
746 | )\r | |
747 | {\r | |
748 | VOID *PageTable;\r | |
749 | UINTN Index;\r | |
750 | UINT64 *Pte;\r | |
751 | UINTN PagesNeeded;\r | |
752 | UINTN Low2MBoundary;\r | |
753 | UINTN High2MBoundary;\r | |
754 | UINTN Pages;\r | |
755 | UINTN GuardPage;\r | |
756 | UINT64 *Pdpte;\r | |
757 | UINTN PageIndex;\r | |
758 | UINTN PageAddress;\r | |
759 | \r | |
760 | Low2MBoundary = 0;\r | |
761 | High2MBoundary = 0;\r | |
762 | PagesNeeded = 0;\r | |
763 | if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r | |
764 | //\r | |
765 | // Add one more page for known good stack, then find the lower 2MB aligned address.\r | |
766 | //\r | |
767 | Low2MBoundary = (mSmmStackArrayBase + EFI_PAGE_SIZE) & ~(SIZE_2MB-1);\r | |
768 | //\r | |
769 | // Add two more pages for known good stack and stack guard page,\r | |
770 | // then find the lower 2MB aligned address.\r | |
771 | //\r | |
772 | High2MBoundary = (mSmmStackArrayEnd - mSmmStackSize + EFI_PAGE_SIZE * 2) & ~(SIZE_2MB-1);\r | |
773 | PagesNeeded = ((High2MBoundary - Low2MBoundary) / SIZE_2MB) + 1;\r | |
774 | }\r | |
775 | //\r | |
776 | // Allocate the page table\r | |
777 | //\r | |
778 | PageTable = AllocatePageTableMemory (ExtraPages + 5 + PagesNeeded);\r | |
779 | ASSERT (PageTable != NULL);\r | |
780 | \r | |
781 | PageTable = (VOID *)((UINTN)PageTable + EFI_PAGES_TO_SIZE (ExtraPages));\r | |
782 | Pte = (UINT64*)PageTable;\r | |
783 | \r | |
784 | //\r | |
785 | // Zero out all page table entries first\r | |
786 | //\r | |
787 | ZeroMem (Pte, EFI_PAGES_TO_SIZE (1));\r | |
788 | \r | |
789 | //\r | |
790 | // Set Page Directory Pointers\r | |
791 | //\r | |
792 | for (Index = 0; Index < 4; Index++) {\r | |
793 | Pte[Index] = (UINTN)PageTable + EFI_PAGE_SIZE * (Index + 1) + (Is32BitPageTable ? IA32_PAE_PDPTE_ATTRIBUTE_BITS : PAGE_ATTRIBUTE_BITS);\r | |
794 | }\r | |
795 | Pte += EFI_PAGE_SIZE / sizeof (*Pte);\r | |
796 | \r | |
797 | //\r | |
798 | // Fill in Page Directory Entries\r | |
799 | //\r | |
800 | for (Index = 0; Index < EFI_PAGE_SIZE * 4 / sizeof (*Pte); Index++) {\r | |
801 | Pte[Index] = (Index << 21) | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;\r | |
802 | }\r | |
803 | \r | |
804 | if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r | |
805 | Pages = (UINTN)PageTable + EFI_PAGES_TO_SIZE (5);\r | |
806 | GuardPage = mSmmStackArrayBase + EFI_PAGE_SIZE;\r | |
807 | Pdpte = (UINT64*)PageTable;\r | |
808 | for (PageIndex = Low2MBoundary; PageIndex <= High2MBoundary; PageIndex += SIZE_2MB) {\r | |
809 | Pte = (UINT64*)(UINTN)(Pdpte[BitFieldRead32 ((UINT32)PageIndex, 30, 31)] & ~(EFI_PAGE_SIZE - 1));\r | |
810 | Pte[BitFieldRead32 ((UINT32)PageIndex, 21, 29)] = (UINT64)Pages | PAGE_ATTRIBUTE_BITS;\r | |
811 | //\r | |
812 | // Fill in Page Table Entries\r | |
813 | //\r | |
814 | Pte = (UINT64*)Pages;\r | |
815 | PageAddress = PageIndex;\r | |
816 | for (Index = 0; Index < EFI_PAGE_SIZE / sizeof (*Pte); Index++) {\r | |
817 | if (PageAddress == GuardPage) {\r | |
818 | //\r | |
819 | // Mark the guard page as non-present\r | |
820 | //\r | |
821 | Pte[Index] = PageAddress;\r | |
822 | GuardPage += mSmmStackSize;\r | |
823 | if (GuardPage > mSmmStackArrayEnd) {\r | |
824 | GuardPage = 0;\r | |
825 | }\r | |
826 | } else {\r | |
827 | Pte[Index] = PageAddress | PAGE_ATTRIBUTE_BITS;\r | |
828 | }\r | |
829 | PageAddress+= EFI_PAGE_SIZE;\r | |
830 | }\r | |
831 | Pages += EFI_PAGE_SIZE;\r | |
832 | }\r | |
833 | }\r | |
834 | \r | |
835 | return (UINT32)(UINTN)PageTable;\r | |
836 | }\r | |
837 | \r | |
838 | /**\r | |
839 | Set memory cache ability.\r | |
840 | \r | |
841 | @param PageTable PageTable Address\r | |
842 | @param Address Memory Address to change cache ability\r | |
843 | @param Cacheability Cache ability to set\r | |
844 | \r | |
845 | **/\r | |
846 | VOID\r | |
847 | SetCacheability (\r | |
848 | IN UINT64 *PageTable,\r | |
849 | IN UINTN Address,\r | |
850 | IN UINT8 Cacheability\r | |
851 | )\r | |
852 | {\r | |
853 | UINTN PTIndex;\r | |
854 | VOID *NewPageTableAddress;\r | |
855 | UINT64 *NewPageTable;\r | |
856 | UINTN Index;\r | |
857 | \r | |
858 | ASSERT ((Address & EFI_PAGE_MASK) == 0);\r | |
859 | \r | |
860 | if (sizeof (UINTN) == sizeof (UINT64)) {\r | |
861 | PTIndex = (UINTN)RShiftU64 (Address, 39) & 0x1ff;\r | |
862 | ASSERT (PageTable[PTIndex] & IA32_PG_P);\r | |
863 | PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & gPhyMask);\r | |
864 | }\r | |
865 | \r | |
866 | PTIndex = (UINTN)RShiftU64 (Address, 30) & 0x1ff;\r | |
867 | ASSERT (PageTable[PTIndex] & IA32_PG_P);\r | |
868 | PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & gPhyMask);\r | |
869 | \r | |
870 | //\r | |
871 | // A perfect implementation should check the original cacheability with the\r | |
872 | // one being set, and break a 2M page entry into pieces only when they\r | |
873 | // disagreed.\r | |
874 | //\r | |
875 | PTIndex = (UINTN)RShiftU64 (Address, 21) & 0x1ff;\r | |
876 | if ((PageTable[PTIndex] & IA32_PG_PS) != 0) {\r | |
877 | //\r | |
878 | // Allocate a page from SMRAM\r | |
879 | //\r | |
880 | NewPageTableAddress = AllocatePageTableMemory (1);\r | |
881 | ASSERT (NewPageTableAddress != NULL);\r | |
882 | \r | |
883 | NewPageTable = (UINT64 *)NewPageTableAddress;\r | |
884 | \r | |
885 | for (Index = 0; Index < 0x200; Index++) {\r | |
886 | NewPageTable[Index] = PageTable[PTIndex];\r | |
887 | if ((NewPageTable[Index] & IA32_PG_PAT_2M) != 0) {\r | |
888 | NewPageTable[Index] &= ~((UINT64)IA32_PG_PAT_2M);\r | |
889 | NewPageTable[Index] |= (UINT64)IA32_PG_PAT_4K;\r | |
890 | }\r | |
891 | NewPageTable[Index] |= (UINT64)(Index << EFI_PAGE_SHIFT);\r | |
892 | }\r | |
893 | \r | |
894 | PageTable[PTIndex] = ((UINTN)NewPageTableAddress & gPhyMask) | PAGE_ATTRIBUTE_BITS;\r | |
895 | }\r | |
896 | \r | |
897 | ASSERT (PageTable[PTIndex] & IA32_PG_P);\r | |
898 | PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & gPhyMask);\r | |
899 | \r | |
900 | PTIndex = (UINTN)RShiftU64 (Address, 12) & 0x1ff;\r | |
901 | ASSERT (PageTable[PTIndex] & IA32_PG_P);\r | |
902 | PageTable[PTIndex] &= ~((UINT64)((IA32_PG_PAT_4K | IA32_PG_CD | IA32_PG_WT)));\r | |
903 | PageTable[PTIndex] |= (UINT64)Cacheability;\r | |
904 | }\r | |
905 | \r | |
906 | \r | |
907 | /**\r | |
908 | Schedule a procedure to run on the specified CPU.\r | |
909 | \r | |
910 | @param Procedure The address of the procedure to run\r | |
911 | @param CpuIndex Target CPU Index\r | |
912 | @param ProcArguments The parameter to pass to the procedure\r | |
913 | \r | |
914 | @retval EFI_INVALID_PARAMETER CpuNumber not valid\r | |
915 | @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP\r | |
916 | @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM\r | |
917 | @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy\r | |
918 | @retval EFI_SUCCESS The procedure has been successfully scheduled\r | |
919 | \r | |
920 | **/\r | |
921 | EFI_STATUS\r | |
922 | EFIAPI\r | |
923 | SmmStartupThisAp (\r | |
924 | IN EFI_AP_PROCEDURE Procedure,\r | |
925 | IN UINTN CpuIndex,\r | |
926 | IN OUT VOID *ProcArguments OPTIONAL\r | |
927 | )\r | |
928 | {\r | |
929 | if (CpuIndex >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus ||\r | |
930 | CpuIndex == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu ||\r | |
931 | !(*(mSmmMpSyncData->CpuData[CpuIndex].Present)) ||\r | |
932 | gSmmCpuPrivate->Operation[CpuIndex] == SmmCpuRemove ||\r | |
933 | !AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[CpuIndex].Busy)) {\r | |
934 | return EFI_INVALID_PARAMETER;\r | |
935 | }\r | |
936 | \r | |
937 | mSmmMpSyncData->CpuData[CpuIndex].Procedure = Procedure;\r | |
938 | mSmmMpSyncData->CpuData[CpuIndex].Parameter = ProcArguments;\r | |
939 | ReleaseSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);\r | |
940 | \r | |
941 | if (FeaturePcdGet (PcdCpuSmmBlockStartupThisAp)) {\r | |
942 | AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r | |
943 | ReleaseSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r | |
944 | }\r | |
945 | return EFI_SUCCESS;\r | |
946 | }\r | |
947 | \r | |
948 | /**\r | |
949 | This function sets DR6 & DR7 according to SMM save state, before running SMM C code.\r | |
950 | They are useful when you want to enable hardware breakpoints in SMM without entry SMM mode.\r | |
951 | \r | |
952 | NOTE: It might not be appreciated in runtime since it might\r | |
953 | conflict with OS debugging facilities. Turn them off in RELEASE.\r | |
954 | \r | |
955 | @param CpuIndex CPU Index\r | |
956 | \r | |
957 | **/\r | |
958 | VOID\r | |
959 | EFIAPI\r | |
960 | CpuSmmDebugEntry (\r | |
961 | IN UINTN CpuIndex\r | |
962 | )\r | |
963 | {\r | |
964 | SMRAM_SAVE_STATE_MAP *CpuSaveState;\r | |
965 | \r | |
966 | if (FeaturePcdGet (PcdCpuSmmDebug)) {\r | |
967 | CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];\r | |
968 | if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {\r | |
969 | AsmWriteDr6 (CpuSaveState->x86._DR6);\r | |
970 | AsmWriteDr7 (CpuSaveState->x86._DR7);\r | |
971 | } else {\r | |
972 | AsmWriteDr6 ((UINTN)CpuSaveState->x64._DR6);\r | |
973 | AsmWriteDr7 ((UINTN)CpuSaveState->x64._DR7);\r | |
974 | }\r | |
975 | }\r | |
976 | }\r | |
977 | \r | |
978 | /**\r | |
979 | This function restores DR6 & DR7 to SMM save state.\r | |
980 | \r | |
981 | NOTE: It might not be appreciated in runtime since it might\r | |
982 | conflict with OS debugging facilities. Turn them off in RELEASE.\r | |
983 | \r | |
984 | @param CpuIndex CPU Index\r | |
985 | \r | |
986 | **/\r | |
987 | VOID\r | |
988 | EFIAPI\r | |
989 | CpuSmmDebugExit (\r | |
990 | IN UINTN CpuIndex\r | |
991 | )\r | |
992 | {\r | |
993 | SMRAM_SAVE_STATE_MAP *CpuSaveState;\r | |
994 | \r | |
995 | if (FeaturePcdGet (PcdCpuSmmDebug)) {\r | |
996 | CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];\r | |
997 | if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {\r | |
998 | CpuSaveState->x86._DR7 = (UINT32)AsmReadDr7 ();\r | |
999 | CpuSaveState->x86._DR6 = (UINT32)AsmReadDr6 ();\r | |
1000 | } else {\r | |
1001 | CpuSaveState->x64._DR7 = AsmReadDr7 ();\r | |
1002 | CpuSaveState->x64._DR6 = AsmReadDr6 ();\r | |
1003 | }\r | |
1004 | }\r | |
1005 | }\r | |
1006 | \r | |
1007 | /**\r | |
1008 | C function for SMI entry, each processor comes here upon SMI trigger.\r | |
1009 | \r | |
1010 | @param CpuIndex CPU Index\r | |
1011 | \r | |
1012 | **/\r | |
1013 | VOID\r | |
1014 | EFIAPI\r | |
1015 | SmiRendezvous (\r | |
1016 | IN UINTN CpuIndex\r | |
1017 | )\r | |
1018 | {\r | |
1019 | EFI_STATUS Status;\r | |
1020 | BOOLEAN ValidSmi;\r | |
1021 | BOOLEAN IsBsp;\r | |
1022 | BOOLEAN BspInProgress;\r | |
1023 | UINTN Index;\r | |
1024 | UINTN Cr2;\r | |
1025 | BOOLEAN XdDisableFlag;\r | |
1026 | MSR_IA32_MISC_ENABLE_REGISTER MiscEnableMsr;\r | |
1027 | \r | |
1028 | //\r | |
1029 | // Save Cr2 because Page Fault exception in SMM may override its value\r | |
1030 | //\r | |
1031 | Cr2 = AsmReadCr2 ();\r | |
1032 | \r | |
1033 | //\r | |
1034 | // Perform CPU specific entry hooks\r | |
1035 | //\r | |
1036 | SmmCpuFeaturesRendezvousEntry (CpuIndex);\r | |
1037 | \r | |
1038 | //\r | |
1039 | // Determine if this is a valid SMI\r | |
1040 | //\r | |
1041 | ValidSmi = PlatformValidSmi();\r | |
1042 | \r | |
1043 | //\r | |
1044 | // Determine if BSP has been already in progress. Note this must be checked after\r | |
1045 | // ValidSmi because BSP may clear a valid SMI source after checking in.\r | |
1046 | //\r | |
1047 | BspInProgress = *mSmmMpSyncData->InsideSmm;\r | |
1048 | \r | |
1049 | if (!BspInProgress && !ValidSmi) {\r | |
1050 | //\r | |
1051 | // If we reach here, it means when we sampled the ValidSmi flag, SMI status had not\r | |
1052 | // been cleared by BSP in a new SMI run (so we have a truly invalid SMI), or SMI\r | |
1053 | // status had been cleared by BSP and an existing SMI run has almost ended. (Note\r | |
1054 | // we sampled ValidSmi flag BEFORE judging BSP-in-progress status.) In both cases, there\r | |
1055 | // is nothing we need to do.\r | |
1056 | //\r | |
1057 | goto Exit;\r | |
1058 | } else {\r | |
1059 | //\r | |
1060 | // Signal presence of this processor\r | |
1061 | //\r | |
1062 | if (ReleaseSemaphore (mSmmMpSyncData->Counter) == 0) {\r | |
1063 | //\r | |
1064 | // BSP has already ended the synchronization, so QUIT!!!\r | |
1065 | //\r | |
1066 | \r | |
1067 | //\r | |
1068 | // Wait for BSP's signal to finish SMI\r | |
1069 | //\r | |
1070 | while (*mSmmMpSyncData->AllCpusInSync) {\r | |
1071 | CpuPause ();\r | |
1072 | }\r | |
1073 | goto Exit;\r | |
1074 | } else {\r | |
1075 | \r | |
1076 | //\r | |
1077 | // The BUSY lock is initialized to Released state.\r | |
1078 | // This needs to be done early enough to be ready for BSP's SmmStartupThisAp() call.\r | |
1079 | // E.g., with Relaxed AP flow, SmmStartupThisAp() may be called immediately\r | |
1080 | // after AP's present flag is detected.\r | |
1081 | //\r | |
1082 | InitializeSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);\r | |
1083 | }\r | |
1084 | \r | |
1085 | //\r | |
1086 | // Try to enable XD\r | |
1087 | //\r | |
1088 | XdDisableFlag = FALSE;\r | |
1089 | if (mXdSupported) {\r | |
1090 | MiscEnableMsr.Uint64 = AsmReadMsr64 (MSR_IA32_MISC_ENABLE);\r | |
1091 | if (MiscEnableMsr.Bits.XD == 1) {\r | |
1092 | XdDisableFlag = TRUE;\r | |
1093 | MiscEnableMsr.Bits.XD = 0;\r | |
1094 | AsmWriteMsr64 (MSR_IA32_MISC_ENABLE, MiscEnableMsr.Uint64);\r | |
1095 | }\r | |
1096 | ActivateXd ();\r | |
1097 | }\r | |
1098 | \r | |
1099 | if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r | |
1100 | ActivateSmmProfile (CpuIndex);\r | |
1101 | }\r | |
1102 | \r | |
1103 | if (BspInProgress) {\r | |
1104 | //\r | |
1105 | // BSP has been elected. Follow AP path, regardless of ValidSmi flag\r | |
1106 | // as BSP may have cleared the SMI status\r | |
1107 | //\r | |
1108 | APHandler (CpuIndex, ValidSmi, mSmmMpSyncData->EffectiveSyncMode);\r | |
1109 | } else {\r | |
1110 | //\r | |
1111 | // We have a valid SMI\r | |
1112 | //\r | |
1113 | \r | |
1114 | //\r | |
1115 | // Elect BSP\r | |
1116 | //\r | |
1117 | IsBsp = FALSE;\r | |
1118 | if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {\r | |
1119 | if (!mSmmMpSyncData->SwitchBsp || mSmmMpSyncData->CandidateBsp[CpuIndex]) {\r | |
1120 | //\r | |
1121 | // Call platform hook to do BSP election\r | |
1122 | //\r | |
1123 | Status = PlatformSmmBspElection (&IsBsp);\r | |
1124 | if (EFI_SUCCESS == Status) {\r | |
1125 | //\r | |
1126 | // Platform hook determines successfully\r | |
1127 | //\r | |
1128 | if (IsBsp) {\r | |
1129 | mSmmMpSyncData->BspIndex = (UINT32)CpuIndex;\r | |
1130 | }\r | |
1131 | } else {\r | |
1132 | //\r | |
1133 | // Platform hook fails to determine, use default BSP election method\r | |
1134 | //\r | |
1135 | InterlockedCompareExchange32 (\r | |
1136 | (UINT32*)&mSmmMpSyncData->BspIndex,\r | |
1137 | (UINT32)-1,\r | |
1138 | (UINT32)CpuIndex\r | |
1139 | );\r | |
1140 | }\r | |
1141 | }\r | |
1142 | }\r | |
1143 | \r | |
1144 | //\r | |
1145 | // "mSmmMpSyncData->BspIndex == CpuIndex" means this is the BSP\r | |
1146 | //\r | |
1147 | if (mSmmMpSyncData->BspIndex == CpuIndex) {\r | |
1148 | \r | |
1149 | //\r | |
1150 | // Clear last request for SwitchBsp.\r | |
1151 | //\r | |
1152 | if (mSmmMpSyncData->SwitchBsp) {\r | |
1153 | mSmmMpSyncData->SwitchBsp = FALSE;\r | |
1154 | for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r | |
1155 | mSmmMpSyncData->CandidateBsp[Index] = FALSE;\r | |
1156 | }\r | |
1157 | }\r | |
1158 | \r | |
1159 | if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r | |
1160 | SmmProfileRecordSmiNum ();\r | |
1161 | }\r | |
1162 | \r | |
1163 | //\r | |
1164 | // BSP Handler is always called with a ValidSmi == TRUE\r | |
1165 | //\r | |
1166 | BSPHandler (CpuIndex, mSmmMpSyncData->EffectiveSyncMode);\r | |
1167 | } else {\r | |
1168 | APHandler (CpuIndex, ValidSmi, mSmmMpSyncData->EffectiveSyncMode);\r | |
1169 | }\r | |
1170 | }\r | |
1171 | \r | |
1172 | ASSERT (*mSmmMpSyncData->CpuData[CpuIndex].Run == 0);\r | |
1173 | \r | |
1174 | //\r | |
1175 | // Wait for BSP's signal to exit SMI\r | |
1176 | //\r | |
1177 | while (*mSmmMpSyncData->AllCpusInSync) {\r | |
1178 | CpuPause ();\r | |
1179 | }\r | |
1180 | \r | |
1181 | //\r | |
1182 | // Restore XD\r | |
1183 | //\r | |
1184 | if (XdDisableFlag) {\r | |
1185 | MiscEnableMsr.Uint64 = AsmReadMsr64 (MSR_IA32_MISC_ENABLE);\r | |
1186 | MiscEnableMsr.Bits.XD = 1;\r | |
1187 | AsmWriteMsr64 (MSR_IA32_MISC_ENABLE, MiscEnableMsr.Uint64);\r | |
1188 | }\r | |
1189 | }\r | |
1190 | \r | |
1191 | Exit:\r | |
1192 | SmmCpuFeaturesRendezvousExit (CpuIndex);\r | |
1193 | //\r | |
1194 | // Restore Cr2\r | |
1195 | //\r | |
1196 | AsmWriteCr2 (Cr2);\r | |
1197 | }\r | |
1198 | \r | |
1199 | /**\r | |
1200 | Allocate buffer for all semaphores and spin locks.\r | |
1201 | \r | |
1202 | **/\r | |
1203 | VOID\r | |
1204 | InitializeSmmCpuSemaphores (\r | |
1205 | VOID\r | |
1206 | )\r | |
1207 | {\r | |
1208 | UINTN ProcessorCount;\r | |
1209 | UINTN TotalSize;\r | |
1210 | UINTN GlobalSemaphoresSize;\r | |
1211 | UINTN CpuSemaphoresSize;\r | |
1212 | UINTN MsrSemahporeSize;\r | |
1213 | UINTN SemaphoreSize;\r | |
1214 | UINTN Pages;\r | |
1215 | UINTN *SemaphoreBlock;\r | |
1216 | UINTN SemaphoreAddr;\r | |
1217 | \r | |
1218 | SemaphoreSize = GetSpinLockProperties ();\r | |
1219 | ProcessorCount = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;\r | |
1220 | GlobalSemaphoresSize = (sizeof (SMM_CPU_SEMAPHORE_GLOBAL) / sizeof (VOID *)) * SemaphoreSize;\r | |
1221 | CpuSemaphoresSize = (sizeof (SMM_CPU_SEMAPHORE_CPU) / sizeof (VOID *)) * ProcessorCount * SemaphoreSize;\r | |
1222 | MsrSemahporeSize = MSR_SPIN_LOCK_INIT_NUM * SemaphoreSize;\r | |
1223 | TotalSize = GlobalSemaphoresSize + CpuSemaphoresSize + MsrSemahporeSize;\r | |
1224 | DEBUG((EFI_D_INFO, "One Semaphore Size = 0x%x\n", SemaphoreSize));\r | |
1225 | DEBUG((EFI_D_INFO, "Total Semaphores Size = 0x%x\n", TotalSize));\r | |
1226 | Pages = EFI_SIZE_TO_PAGES (TotalSize);\r | |
1227 | SemaphoreBlock = AllocatePages (Pages);\r | |
1228 | ASSERT (SemaphoreBlock != NULL);\r | |
1229 | ZeroMem (SemaphoreBlock, TotalSize);\r | |
1230 | \r | |
1231 | SemaphoreAddr = (UINTN)SemaphoreBlock;\r | |
1232 | mSmmCpuSemaphores.SemaphoreGlobal.Counter = (UINT32 *)SemaphoreAddr;\r | |
1233 | SemaphoreAddr += SemaphoreSize;\r | |
1234 | mSmmCpuSemaphores.SemaphoreGlobal.InsideSmm = (BOOLEAN *)SemaphoreAddr;\r | |
1235 | SemaphoreAddr += SemaphoreSize;\r | |
1236 | mSmmCpuSemaphores.SemaphoreGlobal.AllCpusInSync = (BOOLEAN *)SemaphoreAddr;\r | |
1237 | SemaphoreAddr += SemaphoreSize;\r | |
1238 | mSmmCpuSemaphores.SemaphoreGlobal.PFLock = (SPIN_LOCK *)SemaphoreAddr;\r | |
1239 | SemaphoreAddr += SemaphoreSize;\r | |
1240 | mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock\r | |
1241 | = (SPIN_LOCK *)SemaphoreAddr;\r | |
1242 | SemaphoreAddr += SemaphoreSize;\r | |
1243 | mSmmCpuSemaphores.SemaphoreGlobal.MemoryMappedLock\r | |
1244 | = (SPIN_LOCK *)SemaphoreAddr;\r | |
1245 | \r | |
1246 | SemaphoreAddr = (UINTN)SemaphoreBlock + GlobalSemaphoresSize;\r | |
1247 | mSmmCpuSemaphores.SemaphoreCpu.Busy = (SPIN_LOCK *)SemaphoreAddr;\r | |
1248 | SemaphoreAddr += ProcessorCount * SemaphoreSize;\r | |
1249 | mSmmCpuSemaphores.SemaphoreCpu.Run = (UINT32 *)SemaphoreAddr;\r | |
1250 | SemaphoreAddr += ProcessorCount * SemaphoreSize;\r | |
1251 | mSmmCpuSemaphores.SemaphoreCpu.Present = (BOOLEAN *)SemaphoreAddr;\r | |
1252 | \r | |
1253 | SemaphoreAddr = (UINTN)SemaphoreBlock + GlobalSemaphoresSize + CpuSemaphoresSize;\r | |
1254 | mSmmCpuSemaphores.SemaphoreMsr.Msr = (SPIN_LOCK *)SemaphoreAddr;\r | |
1255 | mSmmCpuSemaphores.SemaphoreMsr.AvailableCounter =\r | |
1256 | ((UINTN)SemaphoreBlock + Pages * SIZE_4KB - SemaphoreAddr) / SemaphoreSize;\r | |
1257 | ASSERT (mSmmCpuSemaphores.SemaphoreMsr.AvailableCounter >= MSR_SPIN_LOCK_INIT_NUM);\r | |
1258 | \r | |
1259 | mPFLock = mSmmCpuSemaphores.SemaphoreGlobal.PFLock;\r | |
1260 | mConfigSmmCodeAccessCheckLock = mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock;\r | |
1261 | mMemoryMappedLock = mSmmCpuSemaphores.SemaphoreGlobal.MemoryMappedLock;\r | |
1262 | \r | |
1263 | mSemaphoreSize = SemaphoreSize;\r | |
1264 | }\r | |
1265 | \r | |
1266 | /**\r | |
1267 | Initialize un-cacheable data.\r | |
1268 | \r | |
1269 | **/\r | |
1270 | VOID\r | |
1271 | EFIAPI\r | |
1272 | InitializeMpSyncData (\r | |
1273 | VOID\r | |
1274 | )\r | |
1275 | {\r | |
1276 | UINTN CpuIndex;\r | |
1277 | \r | |
1278 | if (mSmmMpSyncData != NULL) {\r | |
1279 | mSmmMpSyncData->SwitchBsp = FALSE;\r | |
1280 | mSmmMpSyncData->CpuData = (SMM_CPU_DATA_BLOCK *)((UINT8 *)mSmmMpSyncData + sizeof (SMM_DISPATCHER_MP_SYNC_DATA));\r | |
1281 | mSmmMpSyncData->CandidateBsp = (BOOLEAN *)(mSmmMpSyncData->CpuData + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus);\r | |
1282 | if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {\r | |
1283 | //\r | |
1284 | // Enable BSP election by setting BspIndex to -1\r | |
1285 | //\r | |
1286 | mSmmMpSyncData->BspIndex = (UINT32)-1;\r | |
1287 | }\r | |
1288 | mSmmMpSyncData->EffectiveSyncMode = (SMM_CPU_SYNC_MODE) PcdGet8 (PcdCpuSmmSyncMode);\r | |
1289 | \r | |
1290 | mSmmMpSyncData->Counter = mSmmCpuSemaphores.SemaphoreGlobal.Counter;\r | |
1291 | mSmmMpSyncData->InsideSmm = mSmmCpuSemaphores.SemaphoreGlobal.InsideSmm;\r | |
1292 | mSmmMpSyncData->AllCpusInSync = mSmmCpuSemaphores.SemaphoreGlobal.AllCpusInSync;\r | |
1293 | ASSERT (mSmmMpSyncData->Counter != NULL && mSmmMpSyncData->InsideSmm != NULL &&\r | |
1294 | mSmmMpSyncData->AllCpusInSync != NULL);\r | |
1295 | *mSmmMpSyncData->Counter = 0;\r | |
1296 | *mSmmMpSyncData->InsideSmm = FALSE;\r | |
1297 | *mSmmMpSyncData->AllCpusInSync = FALSE;\r | |
1298 | \r | |
1299 | for (CpuIndex = 0; CpuIndex < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; CpuIndex ++) {\r | |
1300 | mSmmMpSyncData->CpuData[CpuIndex].Busy =\r | |
1301 | (SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Busy + mSemaphoreSize * CpuIndex);\r | |
1302 | mSmmMpSyncData->CpuData[CpuIndex].Run =\r | |
1303 | (UINT32 *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Run + mSemaphoreSize * CpuIndex);\r | |
1304 | mSmmMpSyncData->CpuData[CpuIndex].Present =\r | |
1305 | (BOOLEAN *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Present + mSemaphoreSize * CpuIndex);\r | |
1306 | }\r | |
1307 | }\r | |
1308 | }\r | |
1309 | \r | |
1310 | /**\r | |
1311 | Initialize global data for MP synchronization.\r | |
1312 | \r | |
1313 | @param Stacks Base address of SMI stack buffer for all processors.\r | |
1314 | @param StackSize Stack size for each processor in SMM.\r | |
1315 | \r | |
1316 | **/\r | |
1317 | UINT32\r | |
1318 | InitializeMpServiceData (\r | |
1319 | IN VOID *Stacks,\r | |
1320 | IN UINTN StackSize\r | |
1321 | )\r | |
1322 | {\r | |
1323 | UINT32 Cr3;\r | |
1324 | UINTN Index;\r | |
1325 | MTRR_SETTINGS *Mtrr;\r | |
1326 | PROCESSOR_SMM_DESCRIPTOR *Psd;\r | |
1327 | UINT8 *GdtTssTables;\r | |
1328 | UINTN GdtTableStepSize;\r | |
1329 | \r | |
1330 | //\r | |
1331 | // Allocate memory for all locks and semaphores\r | |
1332 | //\r | |
1333 | InitializeSmmCpuSemaphores ();\r | |
1334 | \r | |
1335 | //\r | |
1336 | // Initialize mSmmMpSyncData\r | |
1337 | //\r | |
1338 | mSmmMpSyncDataSize = sizeof (SMM_DISPATCHER_MP_SYNC_DATA) +\r | |
1339 | (sizeof (SMM_CPU_DATA_BLOCK) + sizeof (BOOLEAN)) * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;\r | |
1340 | mSmmMpSyncData = (SMM_DISPATCHER_MP_SYNC_DATA*) AllocatePages (EFI_SIZE_TO_PAGES (mSmmMpSyncDataSize));\r | |
1341 | ASSERT (mSmmMpSyncData != NULL);\r | |
1342 | InitializeMpSyncData ();\r | |
1343 | \r | |
1344 | //\r | |
1345 | // Initialize physical address mask\r | |
1346 | // NOTE: Physical memory above virtual address limit is not supported !!!\r | |
1347 | //\r | |
1348 | AsmCpuid (0x80000008, (UINT32*)&Index, NULL, NULL, NULL);\r | |
1349 | gPhyMask = LShiftU64 (1, (UINT8)Index) - 1;\r | |
1350 | gPhyMask &= (1ull << 48) - EFI_PAGE_SIZE;\r | |
1351 | \r | |
1352 | //\r | |
1353 | // Create page tables\r | |
1354 | //\r | |
1355 | Cr3 = SmmInitPageTable ();\r | |
1356 | \r | |
1357 | GdtTssTables = InitGdt (Cr3, &GdtTableStepSize);\r | |
1358 | \r | |
1359 | //\r | |
1360 | // Initialize PROCESSOR_SMM_DESCRIPTOR for each CPU\r | |
1361 | //\r | |
1362 | for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r | |
1363 | Psd = (PROCESSOR_SMM_DESCRIPTOR *)(VOID *)(UINTN)(mCpuHotPlugData.SmBase[Index] + SMM_PSD_OFFSET);\r | |
1364 | CopyMem (Psd, &gcPsd, sizeof (gcPsd));\r | |
1365 | Psd->SmmGdtPtr = (UINT64)(UINTN)(GdtTssTables + GdtTableStepSize * Index);\r | |
1366 | Psd->SmmGdtSize = gcSmiGdtr.Limit + 1;\r | |
1367 | \r | |
1368 | //\r | |
1369 | // Install SMI handler\r | |
1370 | //\r | |
1371 | InstallSmiHandler (\r | |
1372 | Index,\r | |
1373 | (UINT32)mCpuHotPlugData.SmBase[Index],\r | |
1374 | (VOID*)((UINTN)Stacks + (StackSize * Index)),\r | |
1375 | StackSize,\r | |
1376 | (UINTN)Psd->SmmGdtPtr,\r | |
1377 | Psd->SmmGdtSize,\r | |
1378 | gcSmiIdtr.Base,\r | |
1379 | gcSmiIdtr.Limit + 1,\r | |
1380 | Cr3\r | |
1381 | );\r | |
1382 | }\r | |
1383 | \r | |
1384 | //\r | |
1385 | // Record current MTRR settings\r | |
1386 | //\r | |
1387 | ZeroMem(gSmiMtrrs, sizeof (gSmiMtrrs));\r | |
1388 | Mtrr = (MTRR_SETTINGS*)gSmiMtrrs;\r | |
1389 | MtrrGetAllMtrrs (Mtrr);\r | |
1390 | \r | |
1391 | return Cr3;\r | |
1392 | }\r | |
1393 | \r | |
1394 | /**\r | |
1395 | \r | |
1396 | Register the SMM Foundation entry point.\r | |
1397 | \r | |
1398 | @param This Pointer to EFI_SMM_CONFIGURATION_PROTOCOL instance\r | |
1399 | @param SmmEntryPoint SMM Foundation EntryPoint\r | |
1400 | \r | |
1401 | @retval EFI_SUCCESS Successfully to register SMM foundation entry point\r | |
1402 | \r | |
1403 | **/\r | |
1404 | EFI_STATUS\r | |
1405 | EFIAPI\r | |
1406 | RegisterSmmEntry (\r | |
1407 | IN CONST EFI_SMM_CONFIGURATION_PROTOCOL *This,\r | |
1408 | IN EFI_SMM_ENTRY_POINT SmmEntryPoint\r | |
1409 | )\r | |
1410 | {\r | |
1411 | //\r | |
1412 | // Record SMM Foundation EntryPoint, later invoke it on SMI entry vector.\r | |
1413 | //\r | |
1414 | gSmmCpuPrivate->SmmCoreEntry = SmmEntryPoint;\r | |
1415 | return EFI_SUCCESS;\r | |
1416 | }\r |