]> git.proxmox.com Git - mirror_edk2.git/blob - UefiCpuPkg/PiSmmCpuDxeSmm/CpuS3.c
UefiCpuPkg/PiSmmCpuDxeSmm: Allocate buffer for MSRs semaphores
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / CpuS3.c
1 /** @file
2 Code for Processor S3 restoration
3
4 Copyright (c) 2006 - 2016, Intel Corporation. All rights reserved.<BR>
5 This program and the accompanying materials
6 are licensed and made available under the terms and conditions of the BSD License
7 which accompanies this distribution. The full text of the license may be found at
8 http://opensource.org/licenses/bsd-license.php
9
10 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
11 WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
12
13 **/
14
15 #include "PiSmmCpuDxeSmm.h"
16
17 typedef struct {
18 UINTN Lock;
19 VOID *StackStart;
20 UINTN StackSize;
21 VOID *ApFunction;
22 IA32_DESCRIPTOR GdtrProfile;
23 IA32_DESCRIPTOR IdtrProfile;
24 UINT32 BufferStart;
25 UINT32 Cr3;
26 } MP_CPU_EXCHANGE_INFO;
27
28 typedef struct {
29 UINT8 *RendezvousFunnelAddress;
30 UINTN PModeEntryOffset;
31 UINTN FlatJumpOffset;
32 UINTN Size;
33 UINTN LModeEntryOffset;
34 UINTN LongJumpOffset;
35 } MP_ASSEMBLY_ADDRESS_MAP;
36
37 /**
38 Get starting address and size of the rendezvous entry for APs.
39 Information for fixing a jump instruction in the code is also returned.
40
41 @param AddressMap Output buffer for address map information.
42 **/
43 VOID *
44 EFIAPI
45 AsmGetAddressMap (
46 MP_ASSEMBLY_ADDRESS_MAP *AddressMap
47 );
48
49 #define LEGACY_REGION_SIZE (2 * 0x1000)
50 #define LEGACY_REGION_BASE (0xA0000 - LEGACY_REGION_SIZE)
51
52 ACPI_CPU_DATA mAcpiCpuData;
53 UINT32 mNumberToFinish;
54 MP_CPU_EXCHANGE_INFO *mExchangeInfo;
55 BOOLEAN mRestoreSmmConfigurationInS3 = FALSE;
56 VOID *mGdtForAp = NULL;
57 VOID *mIdtForAp = NULL;
58 VOID *mMachineCheckHandlerForAp = NULL;
59 MP_MSR_LOCK *mMsrSpinLocks = NULL;
60 UINTN mMsrSpinLockCount = MSR_SPIN_LOCK_INIT_NUM;
61 UINTN mMsrCount = 0;
62
63 /**
64 Get MSR spin lock by MSR index.
65
66 @param MsrIndex MSR index value.
67
68 @return Pointer to MSR spin lock.
69
70 **/
71 SPIN_LOCK *
72 GetMsrSpinLockByIndex (
73 IN UINT32 MsrIndex
74 )
75 {
76 UINTN Index;
77 for (Index = 0; Index < mMsrCount; Index++) {
78 if (MsrIndex == mMsrSpinLocks[Index].MsrIndex) {
79 return &mMsrSpinLocks[Index].SpinLock;
80 }
81 }
82 return NULL;
83 }
84
85 /**
86 Initialize MSR spin lock by MSR index.
87
88 @param MsrIndex MSR index value.
89
90 **/
91 VOID
92 InitMsrSpinLockByIndex (
93 IN UINT32 MsrIndex
94 )
95 {
96 UINTN NewMsrSpinLockCount;
97
98 if (mMsrSpinLocks == NULL) {
99 mMsrSpinLocks = (MP_MSR_LOCK *) AllocatePool (sizeof (MP_MSR_LOCK) * mMsrSpinLockCount);
100 ASSERT (mMsrSpinLocks != NULL);
101 }
102 if (GetMsrSpinLockByIndex (MsrIndex) == NULL) {
103 //
104 // Initialize spin lock for MSR programming
105 //
106 mMsrSpinLocks[mMsrCount].MsrIndex = MsrIndex;
107 InitializeSpinLock (&mMsrSpinLocks[mMsrCount].SpinLock);
108 mMsrCount ++;
109 if (mMsrCount == mMsrSpinLockCount) {
110 //
111 // If MSR spin lock buffer is full, enlarge it
112 //
113 NewMsrSpinLockCount = mMsrSpinLockCount + MSR_SPIN_LOCK_INIT_NUM;
114 mMsrSpinLocks = ReallocatePool (
115 sizeof (MP_MSR_LOCK) * mMsrSpinLockCount,
116 sizeof (MP_MSR_LOCK) * NewMsrSpinLockCount,
117 mMsrSpinLocks
118 );
119 mMsrSpinLockCount = NewMsrSpinLockCount;
120 }
121 }
122 }
123
124 /**
125 Sync up the MTRR values for all processors.
126
127 @param MtrrTable Table holding fixed/variable MTRR values to be loaded.
128 **/
129 VOID
130 EFIAPI
131 LoadMtrrData (
132 EFI_PHYSICAL_ADDRESS MtrrTable
133 )
134 /*++
135
136 Routine Description:
137
138 Sync up the MTRR values for all processors.
139
140 Arguments:
141
142 Returns:
143 None
144
145 --*/
146 {
147 MTRR_SETTINGS *MtrrSettings;
148
149 MtrrSettings = (MTRR_SETTINGS *) (UINTN) MtrrTable;
150 MtrrSetAllMtrrs (MtrrSettings);
151 }
152
153 /**
154 Programs registers for the calling processor.
155
156 This function programs registers for the calling processor.
157
158 @param RegisterTable Pointer to register table of the running processor.
159
160 **/
161 VOID
162 SetProcessorRegister (
163 IN CPU_REGISTER_TABLE *RegisterTable
164 )
165 {
166 CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;
167 UINTN Index;
168 UINTN Value;
169 SPIN_LOCK *MsrSpinLock;
170
171 //
172 // Traverse Register Table of this logical processor
173 //
174 RegisterTableEntry = (CPU_REGISTER_TABLE_ENTRY *) (UINTN) RegisterTable->RegisterTableEntry;
175 for (Index = 0; Index < RegisterTable->TableLength; Index++, RegisterTableEntry++) {
176 //
177 // Check the type of specified register
178 //
179 switch (RegisterTableEntry->RegisterType) {
180 //
181 // The specified register is Control Register
182 //
183 case ControlRegister:
184 switch (RegisterTableEntry->Index) {
185 case 0:
186 Value = AsmReadCr0 ();
187 Value = (UINTN) BitFieldWrite64 (
188 Value,
189 RegisterTableEntry->ValidBitStart,
190 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
191 (UINTN) RegisterTableEntry->Value
192 );
193 AsmWriteCr0 (Value);
194 break;
195 case 2:
196 Value = AsmReadCr2 ();
197 Value = (UINTN) BitFieldWrite64 (
198 Value,
199 RegisterTableEntry->ValidBitStart,
200 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
201 (UINTN) RegisterTableEntry->Value
202 );
203 AsmWriteCr2 (Value);
204 break;
205 case 3:
206 Value = AsmReadCr3 ();
207 Value = (UINTN) BitFieldWrite64 (
208 Value,
209 RegisterTableEntry->ValidBitStart,
210 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
211 (UINTN) RegisterTableEntry->Value
212 );
213 AsmWriteCr3 (Value);
214 break;
215 case 4:
216 Value = AsmReadCr4 ();
217 Value = (UINTN) BitFieldWrite64 (
218 Value,
219 RegisterTableEntry->ValidBitStart,
220 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
221 (UINTN) RegisterTableEntry->Value
222 );
223 AsmWriteCr4 (Value);
224 break;
225 default:
226 break;
227 }
228 break;
229 //
230 // The specified register is Model Specific Register
231 //
232 case Msr:
233 //
234 // If this function is called to restore register setting after INIT signal,
235 // there is no need to restore MSRs in register table.
236 //
237 if (RegisterTableEntry->ValidBitLength >= 64) {
238 //
239 // If length is not less than 64 bits, then directly write without reading
240 //
241 AsmWriteMsr64 (
242 RegisterTableEntry->Index,
243 RegisterTableEntry->Value
244 );
245 } else {
246 //
247 // Get lock to avoid Package/Core scope MSRs programming issue in parallel execution mode
248 // to make sure MSR read/write operation is atomic.
249 //
250 MsrSpinLock = GetMsrSpinLockByIndex (RegisterTableEntry->Index);
251 AcquireSpinLock (MsrSpinLock);
252 //
253 // Set the bit section according to bit start and length
254 //
255 AsmMsrBitFieldWrite64 (
256 RegisterTableEntry->Index,
257 RegisterTableEntry->ValidBitStart,
258 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
259 RegisterTableEntry->Value
260 );
261 ReleaseSpinLock (MsrSpinLock);
262 }
263 break;
264 //
265 // Enable or disable cache
266 //
267 case CacheControl:
268 //
269 // If value of the entry is 0, then disable cache. Otherwise, enable cache.
270 //
271 if (RegisterTableEntry->Value == 0) {
272 AsmDisableCache ();
273 } else {
274 AsmEnableCache ();
275 }
276 break;
277
278 default:
279 break;
280 }
281 }
282 }
283
284 /**
285 AP initialization before SMBASE relocation in the S3 boot path.
286 **/
287 VOID
288 EarlyMPRendezvousProcedure (
289 VOID
290 )
291 {
292 CPU_REGISTER_TABLE *RegisterTableList;
293 UINT32 InitApicId;
294 UINTN Index;
295
296 LoadMtrrData (mAcpiCpuData.MtrrTable);
297
298 //
299 // Find processor number for this CPU.
300 //
301 RegisterTableList = (CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.PreSmmInitRegisterTable;
302 InitApicId = GetInitialApicId ();
303 for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {
304 if (RegisterTableList[Index].InitialApicId == InitApicId) {
305 SetProcessorRegister (&RegisterTableList[Index]);
306 break;
307 }
308 }
309
310 //
311 // Count down the number with lock mechanism.
312 //
313 InterlockedDecrement (&mNumberToFinish);
314 }
315
316 /**
317 AP initialization after SMBASE relocation in the S3 boot path.
318 **/
319 VOID
320 MPRendezvousProcedure (
321 VOID
322 )
323 {
324 CPU_REGISTER_TABLE *RegisterTableList;
325 UINT32 InitApicId;
326 UINTN Index;
327
328 ProgramVirtualWireMode ();
329 DisableLvtInterrupts ();
330
331 RegisterTableList = (CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.RegisterTable;
332 InitApicId = GetInitialApicId ();
333 for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {
334 if (RegisterTableList[Index].InitialApicId == InitApicId) {
335 SetProcessorRegister (&RegisterTableList[Index]);
336 break;
337 }
338 }
339
340 //
341 // Count down the number with lock mechanism.
342 //
343 InterlockedDecrement (&mNumberToFinish);
344 }
345
346 /**
347 Prepares startup vector for APs.
348
349 This function prepares startup vector for APs.
350
351 @param WorkingBuffer The address of the work buffer.
352 **/
353 VOID
354 PrepareApStartupVector (
355 EFI_PHYSICAL_ADDRESS WorkingBuffer
356 )
357 {
358 EFI_PHYSICAL_ADDRESS StartupVector;
359 MP_ASSEMBLY_ADDRESS_MAP AddressMap;
360
361 //
362 // Get the address map of startup code for AP,
363 // including code size, and offset of long jump instructions to redirect.
364 //
365 ZeroMem (&AddressMap, sizeof (AddressMap));
366 AsmGetAddressMap (&AddressMap);
367
368 StartupVector = WorkingBuffer;
369
370 //
371 // Copy AP startup code to startup vector, and then redirect the long jump
372 // instructions for mode switching.
373 //
374 CopyMem ((VOID *) (UINTN) StartupVector, AddressMap.RendezvousFunnelAddress, AddressMap.Size);
375 *(UINT32 *) (UINTN) (StartupVector + AddressMap.FlatJumpOffset + 3) = (UINT32) (StartupVector + AddressMap.PModeEntryOffset);
376 if (AddressMap.LongJumpOffset != 0) {
377 *(UINT32 *) (UINTN) (StartupVector + AddressMap.LongJumpOffset + 2) = (UINT32) (StartupVector + AddressMap.LModeEntryOffset);
378 }
379
380 //
381 // Get the start address of exchange data between BSP and AP.
382 //
383 mExchangeInfo = (MP_CPU_EXCHANGE_INFO *) (UINTN) (StartupVector + AddressMap.Size);
384 ZeroMem ((VOID *) mExchangeInfo, sizeof (MP_CPU_EXCHANGE_INFO));
385
386 CopyMem ((VOID *) (UINTN) &mExchangeInfo->GdtrProfile, (VOID *) (UINTN) mAcpiCpuData.GdtrProfile, sizeof (IA32_DESCRIPTOR));
387 CopyMem ((VOID *) (UINTN) &mExchangeInfo->IdtrProfile, (VOID *) (UINTN) mAcpiCpuData.IdtrProfile, sizeof (IA32_DESCRIPTOR));
388
389 //
390 // Copy AP's GDT, IDT and Machine Check handler from SMRAM to ACPI NVS memory
391 //
392 CopyMem ((VOID *) mExchangeInfo->GdtrProfile.Base, mGdtForAp, mExchangeInfo->GdtrProfile.Limit + 1);
393 CopyMem ((VOID *) mExchangeInfo->IdtrProfile.Base, mIdtForAp, mExchangeInfo->IdtrProfile.Limit + 1);
394 CopyMem ((VOID *)(UINTN) mAcpiCpuData.ApMachineCheckHandlerBase, mMachineCheckHandlerForAp, mAcpiCpuData.ApMachineCheckHandlerSize);
395
396 mExchangeInfo->StackStart = (VOID *) (UINTN) mAcpiCpuData.StackAddress;
397 mExchangeInfo->StackSize = mAcpiCpuData.StackSize;
398 mExchangeInfo->BufferStart = (UINT32) StartupVector;
399 mExchangeInfo->Cr3 = (UINT32) (AsmReadCr3 ());
400 }
401
402 /**
403 The function is invoked before SMBASE relocation in S3 path to restores CPU status.
404
405 The function is invoked before SMBASE relocation in S3 path. It does first time microcode load
406 and restores MTRRs for both BSP and APs.
407
408 **/
409 VOID
410 EarlyInitializeCpu (
411 VOID
412 )
413 {
414 CPU_REGISTER_TABLE *RegisterTableList;
415 UINT32 InitApicId;
416 UINTN Index;
417
418 LoadMtrrData (mAcpiCpuData.MtrrTable);
419
420 //
421 // Find processor number for this CPU.
422 //
423 RegisterTableList = (CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.PreSmmInitRegisterTable;
424 InitApicId = GetInitialApicId ();
425 for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {
426 if (RegisterTableList[Index].InitialApicId == InitApicId) {
427 SetProcessorRegister (&RegisterTableList[Index]);
428 break;
429 }
430 }
431
432 ProgramVirtualWireMode ();
433
434 PrepareApStartupVector (mAcpiCpuData.StartupVector);
435
436 mNumberToFinish = mAcpiCpuData.NumberOfCpus - 1;
437 mExchangeInfo->ApFunction = (VOID *) (UINTN) EarlyMPRendezvousProcedure;
438
439 //
440 // Send INIT IPI - SIPI to all APs
441 //
442 SendInitSipiSipiAllExcludingSelf ((UINT32)mAcpiCpuData.StartupVector);
443
444 while (mNumberToFinish > 0) {
445 CpuPause ();
446 }
447 }
448
449 /**
450 The function is invoked after SMBASE relocation in S3 path to restores CPU status.
451
452 The function is invoked after SMBASE relocation in S3 path. It restores configuration according to
453 data saved by normal boot path for both BSP and APs.
454
455 **/
456 VOID
457 InitializeCpu (
458 VOID
459 )
460 {
461 CPU_REGISTER_TABLE *RegisterTableList;
462 UINT32 InitApicId;
463 UINTN Index;
464
465 RegisterTableList = (CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.RegisterTable;
466 InitApicId = GetInitialApicId ();
467 for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {
468 if (RegisterTableList[Index].InitialApicId == InitApicId) {
469 SetProcessorRegister (&RegisterTableList[Index]);
470 break;
471 }
472 }
473
474 mNumberToFinish = mAcpiCpuData.NumberOfCpus - 1;
475 //
476 // StackStart was updated when APs were waken up in EarlyInitializeCpu.
477 // Re-initialize StackAddress to original beginning address.
478 //
479 mExchangeInfo->StackStart = (VOID *) (UINTN) mAcpiCpuData.StackAddress;
480 mExchangeInfo->ApFunction = (VOID *) (UINTN) MPRendezvousProcedure;
481
482 //
483 // Send INIT IPI - SIPI to all APs
484 //
485 SendInitSipiSipiAllExcludingSelf ((UINT32)mAcpiCpuData.StartupVector);
486
487 while (mNumberToFinish > 0) {
488 CpuPause ();
489 }
490 }