]> git.proxmox.com Git - mirror_edk2.git/blob - UefiCpuPkg/PiSmmCpuDxeSmm/CpuS3.c
UefiCpuPkg/PiSmmCpuDxeSmm: Using MSRs semaphores in aligned buffer
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / CpuS3.c
1 /** @file
2 Code for Processor S3 restoration
3
4 Copyright (c) 2006 - 2016, Intel Corporation. All rights reserved.<BR>
5 This program and the accompanying materials
6 are licensed and made available under the terms and conditions of the BSD License
7 which accompanies this distribution. The full text of the license may be found at
8 http://opensource.org/licenses/bsd-license.php
9
10 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
11 WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
12
13 **/
14
15 #include "PiSmmCpuDxeSmm.h"
16
17 typedef struct {
18 UINTN Lock;
19 VOID *StackStart;
20 UINTN StackSize;
21 VOID *ApFunction;
22 IA32_DESCRIPTOR GdtrProfile;
23 IA32_DESCRIPTOR IdtrProfile;
24 UINT32 BufferStart;
25 UINT32 Cr3;
26 } MP_CPU_EXCHANGE_INFO;
27
28 typedef struct {
29 UINT8 *RendezvousFunnelAddress;
30 UINTN PModeEntryOffset;
31 UINTN FlatJumpOffset;
32 UINTN Size;
33 UINTN LModeEntryOffset;
34 UINTN LongJumpOffset;
35 } MP_ASSEMBLY_ADDRESS_MAP;
36
37 /**
38 Get starting address and size of the rendezvous entry for APs.
39 Information for fixing a jump instruction in the code is also returned.
40
41 @param AddressMap Output buffer for address map information.
42 **/
43 VOID *
44 EFIAPI
45 AsmGetAddressMap (
46 MP_ASSEMBLY_ADDRESS_MAP *AddressMap
47 );
48
49 #define LEGACY_REGION_SIZE (2 * 0x1000)
50 #define LEGACY_REGION_BASE (0xA0000 - LEGACY_REGION_SIZE)
51
52 ACPI_CPU_DATA mAcpiCpuData;
53 UINT32 mNumberToFinish;
54 MP_CPU_EXCHANGE_INFO *mExchangeInfo;
55 BOOLEAN mRestoreSmmConfigurationInS3 = FALSE;
56 VOID *mGdtForAp = NULL;
57 VOID *mIdtForAp = NULL;
58 VOID *mMachineCheckHandlerForAp = NULL;
59 MP_MSR_LOCK *mMsrSpinLocks = NULL;
60 UINTN mMsrSpinLockCount;
61 UINTN mMsrCount = 0;
62
63 /**
64 Get MSR spin lock by MSR index.
65
66 @param MsrIndex MSR index value.
67
68 @return Pointer to MSR spin lock.
69
70 **/
71 SPIN_LOCK *
72 GetMsrSpinLockByIndex (
73 IN UINT32 MsrIndex
74 )
75 {
76 UINTN Index;
77 for (Index = 0; Index < mMsrCount; Index++) {
78 if (MsrIndex == mMsrSpinLocks[Index].MsrIndex) {
79 return mMsrSpinLocks[Index].SpinLock;
80 }
81 }
82 return NULL;
83 }
84
85 /**
86 Initialize MSR spin lock by MSR index.
87
88 @param MsrIndex MSR index value.
89
90 **/
91 VOID
92 InitMsrSpinLockByIndex (
93 IN UINT32 MsrIndex
94 )
95 {
96 UINTN MsrSpinLockCount;
97 UINTN NewMsrSpinLockCount;
98 UINTN Index;
99 UINTN AddedSize;
100
101 if (mMsrSpinLocks == NULL) {
102 MsrSpinLockCount = mSmmCpuSemaphores.SemaphoreMsr.AvailableCounter;
103 mMsrSpinLocks = (MP_MSR_LOCK *) AllocatePool (sizeof (MP_MSR_LOCK) * MsrSpinLockCount);
104 ASSERT (mMsrSpinLocks != NULL);
105 for (Index = 0; Index < MsrSpinLockCount; Index++) {
106 mMsrSpinLocks[Index].SpinLock =
107 (SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreMsr.Msr + Index * mSemaphoreSize);
108 mMsrSpinLocks[Index].MsrIndex = (UINT32)-1;
109 }
110 mMsrSpinLockCount = MsrSpinLockCount;
111 mSmmCpuSemaphores.SemaphoreMsr.AvailableCounter = 0;
112 }
113 if (GetMsrSpinLockByIndex (MsrIndex) == NULL) {
114 //
115 // Initialize spin lock for MSR programming
116 //
117 mMsrSpinLocks[mMsrCount].MsrIndex = MsrIndex;
118 InitializeSpinLock (mMsrSpinLocks[mMsrCount].SpinLock);
119 mMsrCount ++;
120 if (mMsrCount == mMsrSpinLockCount) {
121 //
122 // If MSR spin lock buffer is full, enlarge it
123 //
124 AddedSize = SIZE_4KB;
125 mSmmCpuSemaphores.SemaphoreMsr.Msr =
126 AllocatePages (EFI_SIZE_TO_PAGES(AddedSize));
127 ASSERT (mSmmCpuSemaphores.SemaphoreMsr.Msr != NULL);
128 NewMsrSpinLockCount = mMsrSpinLockCount + AddedSize / mSemaphoreSize;
129 mMsrSpinLocks = ReallocatePool (
130 sizeof (MP_MSR_LOCK) * mMsrSpinLockCount,
131 sizeof (MP_MSR_LOCK) * NewMsrSpinLockCount,
132 mMsrSpinLocks
133 );
134 ASSERT (mMsrSpinLocks != NULL);
135 mMsrSpinLockCount = NewMsrSpinLockCount;
136 for (Index = mMsrCount; Index < mMsrSpinLockCount; Index++) {
137 mMsrSpinLocks[Index].SpinLock =
138 (SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreMsr.Msr +
139 (Index - mMsrCount) * mSemaphoreSize);
140 mMsrSpinLocks[Index].MsrIndex = (UINT32)-1;
141 }
142 }
143 }
144 }
145
146 /**
147 Sync up the MTRR values for all processors.
148
149 @param MtrrTable Table holding fixed/variable MTRR values to be loaded.
150 **/
151 VOID
152 EFIAPI
153 LoadMtrrData (
154 EFI_PHYSICAL_ADDRESS MtrrTable
155 )
156 /*++
157
158 Routine Description:
159
160 Sync up the MTRR values for all processors.
161
162 Arguments:
163
164 Returns:
165 None
166
167 --*/
168 {
169 MTRR_SETTINGS *MtrrSettings;
170
171 MtrrSettings = (MTRR_SETTINGS *) (UINTN) MtrrTable;
172 MtrrSetAllMtrrs (MtrrSettings);
173 }
174
175 /**
176 Programs registers for the calling processor.
177
178 This function programs registers for the calling processor.
179
180 @param RegisterTable Pointer to register table of the running processor.
181
182 **/
183 VOID
184 SetProcessorRegister (
185 IN CPU_REGISTER_TABLE *RegisterTable
186 )
187 {
188 CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;
189 UINTN Index;
190 UINTN Value;
191 SPIN_LOCK *MsrSpinLock;
192
193 //
194 // Traverse Register Table of this logical processor
195 //
196 RegisterTableEntry = (CPU_REGISTER_TABLE_ENTRY *) (UINTN) RegisterTable->RegisterTableEntry;
197 for (Index = 0; Index < RegisterTable->TableLength; Index++, RegisterTableEntry++) {
198 //
199 // Check the type of specified register
200 //
201 switch (RegisterTableEntry->RegisterType) {
202 //
203 // The specified register is Control Register
204 //
205 case ControlRegister:
206 switch (RegisterTableEntry->Index) {
207 case 0:
208 Value = AsmReadCr0 ();
209 Value = (UINTN) BitFieldWrite64 (
210 Value,
211 RegisterTableEntry->ValidBitStart,
212 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
213 (UINTN) RegisterTableEntry->Value
214 );
215 AsmWriteCr0 (Value);
216 break;
217 case 2:
218 Value = AsmReadCr2 ();
219 Value = (UINTN) BitFieldWrite64 (
220 Value,
221 RegisterTableEntry->ValidBitStart,
222 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
223 (UINTN) RegisterTableEntry->Value
224 );
225 AsmWriteCr2 (Value);
226 break;
227 case 3:
228 Value = AsmReadCr3 ();
229 Value = (UINTN) BitFieldWrite64 (
230 Value,
231 RegisterTableEntry->ValidBitStart,
232 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
233 (UINTN) RegisterTableEntry->Value
234 );
235 AsmWriteCr3 (Value);
236 break;
237 case 4:
238 Value = AsmReadCr4 ();
239 Value = (UINTN) BitFieldWrite64 (
240 Value,
241 RegisterTableEntry->ValidBitStart,
242 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
243 (UINTN) RegisterTableEntry->Value
244 );
245 AsmWriteCr4 (Value);
246 break;
247 default:
248 break;
249 }
250 break;
251 //
252 // The specified register is Model Specific Register
253 //
254 case Msr:
255 //
256 // If this function is called to restore register setting after INIT signal,
257 // there is no need to restore MSRs in register table.
258 //
259 if (RegisterTableEntry->ValidBitLength >= 64) {
260 //
261 // If length is not less than 64 bits, then directly write without reading
262 //
263 AsmWriteMsr64 (
264 RegisterTableEntry->Index,
265 RegisterTableEntry->Value
266 );
267 } else {
268 //
269 // Get lock to avoid Package/Core scope MSRs programming issue in parallel execution mode
270 // to make sure MSR read/write operation is atomic.
271 //
272 MsrSpinLock = GetMsrSpinLockByIndex (RegisterTableEntry->Index);
273 AcquireSpinLock (MsrSpinLock);
274 //
275 // Set the bit section according to bit start and length
276 //
277 AsmMsrBitFieldWrite64 (
278 RegisterTableEntry->Index,
279 RegisterTableEntry->ValidBitStart,
280 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
281 RegisterTableEntry->Value
282 );
283 ReleaseSpinLock (MsrSpinLock);
284 }
285 break;
286 //
287 // Enable or disable cache
288 //
289 case CacheControl:
290 //
291 // If value of the entry is 0, then disable cache. Otherwise, enable cache.
292 //
293 if (RegisterTableEntry->Value == 0) {
294 AsmDisableCache ();
295 } else {
296 AsmEnableCache ();
297 }
298 break;
299
300 default:
301 break;
302 }
303 }
304 }
305
306 /**
307 AP initialization before SMBASE relocation in the S3 boot path.
308 **/
309 VOID
310 EarlyMPRendezvousProcedure (
311 VOID
312 )
313 {
314 CPU_REGISTER_TABLE *RegisterTableList;
315 UINT32 InitApicId;
316 UINTN Index;
317
318 LoadMtrrData (mAcpiCpuData.MtrrTable);
319
320 //
321 // Find processor number for this CPU.
322 //
323 RegisterTableList = (CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.PreSmmInitRegisterTable;
324 InitApicId = GetInitialApicId ();
325 for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {
326 if (RegisterTableList[Index].InitialApicId == InitApicId) {
327 SetProcessorRegister (&RegisterTableList[Index]);
328 break;
329 }
330 }
331
332 //
333 // Count down the number with lock mechanism.
334 //
335 InterlockedDecrement (&mNumberToFinish);
336 }
337
338 /**
339 AP initialization after SMBASE relocation in the S3 boot path.
340 **/
341 VOID
342 MPRendezvousProcedure (
343 VOID
344 )
345 {
346 CPU_REGISTER_TABLE *RegisterTableList;
347 UINT32 InitApicId;
348 UINTN Index;
349
350 ProgramVirtualWireMode ();
351 DisableLvtInterrupts ();
352
353 RegisterTableList = (CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.RegisterTable;
354 InitApicId = GetInitialApicId ();
355 for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {
356 if (RegisterTableList[Index].InitialApicId == InitApicId) {
357 SetProcessorRegister (&RegisterTableList[Index]);
358 break;
359 }
360 }
361
362 //
363 // Count down the number with lock mechanism.
364 //
365 InterlockedDecrement (&mNumberToFinish);
366 }
367
368 /**
369 Prepares startup vector for APs.
370
371 This function prepares startup vector for APs.
372
373 @param WorkingBuffer The address of the work buffer.
374 **/
375 VOID
376 PrepareApStartupVector (
377 EFI_PHYSICAL_ADDRESS WorkingBuffer
378 )
379 {
380 EFI_PHYSICAL_ADDRESS StartupVector;
381 MP_ASSEMBLY_ADDRESS_MAP AddressMap;
382
383 //
384 // Get the address map of startup code for AP,
385 // including code size, and offset of long jump instructions to redirect.
386 //
387 ZeroMem (&AddressMap, sizeof (AddressMap));
388 AsmGetAddressMap (&AddressMap);
389
390 StartupVector = WorkingBuffer;
391
392 //
393 // Copy AP startup code to startup vector, and then redirect the long jump
394 // instructions for mode switching.
395 //
396 CopyMem ((VOID *) (UINTN) StartupVector, AddressMap.RendezvousFunnelAddress, AddressMap.Size);
397 *(UINT32 *) (UINTN) (StartupVector + AddressMap.FlatJumpOffset + 3) = (UINT32) (StartupVector + AddressMap.PModeEntryOffset);
398 if (AddressMap.LongJumpOffset != 0) {
399 *(UINT32 *) (UINTN) (StartupVector + AddressMap.LongJumpOffset + 2) = (UINT32) (StartupVector + AddressMap.LModeEntryOffset);
400 }
401
402 //
403 // Get the start address of exchange data between BSP and AP.
404 //
405 mExchangeInfo = (MP_CPU_EXCHANGE_INFO *) (UINTN) (StartupVector + AddressMap.Size);
406 ZeroMem ((VOID *) mExchangeInfo, sizeof (MP_CPU_EXCHANGE_INFO));
407
408 CopyMem ((VOID *) (UINTN) &mExchangeInfo->GdtrProfile, (VOID *) (UINTN) mAcpiCpuData.GdtrProfile, sizeof (IA32_DESCRIPTOR));
409 CopyMem ((VOID *) (UINTN) &mExchangeInfo->IdtrProfile, (VOID *) (UINTN) mAcpiCpuData.IdtrProfile, sizeof (IA32_DESCRIPTOR));
410
411 //
412 // Copy AP's GDT, IDT and Machine Check handler from SMRAM to ACPI NVS memory
413 //
414 CopyMem ((VOID *) mExchangeInfo->GdtrProfile.Base, mGdtForAp, mExchangeInfo->GdtrProfile.Limit + 1);
415 CopyMem ((VOID *) mExchangeInfo->IdtrProfile.Base, mIdtForAp, mExchangeInfo->IdtrProfile.Limit + 1);
416 CopyMem ((VOID *)(UINTN) mAcpiCpuData.ApMachineCheckHandlerBase, mMachineCheckHandlerForAp, mAcpiCpuData.ApMachineCheckHandlerSize);
417
418 mExchangeInfo->StackStart = (VOID *) (UINTN) mAcpiCpuData.StackAddress;
419 mExchangeInfo->StackSize = mAcpiCpuData.StackSize;
420 mExchangeInfo->BufferStart = (UINT32) StartupVector;
421 mExchangeInfo->Cr3 = (UINT32) (AsmReadCr3 ());
422 }
423
424 /**
425 The function is invoked before SMBASE relocation in S3 path to restores CPU status.
426
427 The function is invoked before SMBASE relocation in S3 path. It does first time microcode load
428 and restores MTRRs for both BSP and APs.
429
430 **/
431 VOID
432 EarlyInitializeCpu (
433 VOID
434 )
435 {
436 CPU_REGISTER_TABLE *RegisterTableList;
437 UINT32 InitApicId;
438 UINTN Index;
439
440 LoadMtrrData (mAcpiCpuData.MtrrTable);
441
442 //
443 // Find processor number for this CPU.
444 //
445 RegisterTableList = (CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.PreSmmInitRegisterTable;
446 InitApicId = GetInitialApicId ();
447 for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {
448 if (RegisterTableList[Index].InitialApicId == InitApicId) {
449 SetProcessorRegister (&RegisterTableList[Index]);
450 break;
451 }
452 }
453
454 ProgramVirtualWireMode ();
455
456 PrepareApStartupVector (mAcpiCpuData.StartupVector);
457
458 mNumberToFinish = mAcpiCpuData.NumberOfCpus - 1;
459 mExchangeInfo->ApFunction = (VOID *) (UINTN) EarlyMPRendezvousProcedure;
460
461 //
462 // Send INIT IPI - SIPI to all APs
463 //
464 SendInitSipiSipiAllExcludingSelf ((UINT32)mAcpiCpuData.StartupVector);
465
466 while (mNumberToFinish > 0) {
467 CpuPause ();
468 }
469 }
470
471 /**
472 The function is invoked after SMBASE relocation in S3 path to restores CPU status.
473
474 The function is invoked after SMBASE relocation in S3 path. It restores configuration according to
475 data saved by normal boot path for both BSP and APs.
476
477 **/
478 VOID
479 InitializeCpu (
480 VOID
481 )
482 {
483 CPU_REGISTER_TABLE *RegisterTableList;
484 UINT32 InitApicId;
485 UINTN Index;
486
487 RegisterTableList = (CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.RegisterTable;
488 InitApicId = GetInitialApicId ();
489 for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {
490 if (RegisterTableList[Index].InitialApicId == InitApicId) {
491 SetProcessorRegister (&RegisterTableList[Index]);
492 break;
493 }
494 }
495
496 mNumberToFinish = mAcpiCpuData.NumberOfCpus - 1;
497 //
498 // StackStart was updated when APs were waken up in EarlyInitializeCpu.
499 // Re-initialize StackAddress to original beginning address.
500 //
501 mExchangeInfo->StackStart = (VOID *) (UINTN) mAcpiCpuData.StackAddress;
502 mExchangeInfo->ApFunction = (VOID *) (UINTN) MPRendezvousProcedure;
503
504 //
505 // Send INIT IPI - SIPI to all APs
506 //
507 SendInitSipiSipiAllExcludingSelf ((UINT32)mAcpiCpuData.StartupVector);
508
509 while (mNumberToFinish > 0) {
510 CpuPause ();
511 }
512 }