]> git.proxmox.com Git - mirror_edk2.git/blob - UefiCpuPkg/PiSmmCpuDxeSmm/CpuS3.c
UefiCpuPkg/PiSmmCpuDxeSmm: Add MemoryMapped in SetProcessorRegister()
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / CpuS3.c
1 /** @file
2 Code for Processor S3 restoration
3
4 Copyright (c) 2006 - 2016, Intel Corporation. All rights reserved.<BR>
5 This program and the accompanying materials
6 are licensed and made available under the terms and conditions of the BSD License
7 which accompanies this distribution. The full text of the license may be found at
8 http://opensource.org/licenses/bsd-license.php
9
10 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
11 WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
12
13 **/
14
15 #include "PiSmmCpuDxeSmm.h"
16
17 typedef struct {
18 UINTN Lock;
19 VOID *StackStart;
20 UINTN StackSize;
21 VOID *ApFunction;
22 IA32_DESCRIPTOR GdtrProfile;
23 IA32_DESCRIPTOR IdtrProfile;
24 UINT32 BufferStart;
25 UINT32 Cr3;
26 } MP_CPU_EXCHANGE_INFO;
27
28 typedef struct {
29 UINT8 *RendezvousFunnelAddress;
30 UINTN PModeEntryOffset;
31 UINTN FlatJumpOffset;
32 UINTN Size;
33 UINTN LModeEntryOffset;
34 UINTN LongJumpOffset;
35 } MP_ASSEMBLY_ADDRESS_MAP;
36
37 //
38 // Spin lock used to serialize MemoryMapped operation
39 //
40 SPIN_LOCK *mMemoryMappedLock = NULL;
41
42 /**
43 Get starting address and size of the rendezvous entry for APs.
44 Information for fixing a jump instruction in the code is also returned.
45
46 @param AddressMap Output buffer for address map information.
47 **/
48 VOID *
49 EFIAPI
50 AsmGetAddressMap (
51 MP_ASSEMBLY_ADDRESS_MAP *AddressMap
52 );
53
54 #define LEGACY_REGION_SIZE (2 * 0x1000)
55 #define LEGACY_REGION_BASE (0xA0000 - LEGACY_REGION_SIZE)
56
57 ACPI_CPU_DATA mAcpiCpuData;
58 UINT32 mNumberToFinish;
59 MP_CPU_EXCHANGE_INFO *mExchangeInfo;
60 BOOLEAN mRestoreSmmConfigurationInS3 = FALSE;
61 VOID *mGdtForAp = NULL;
62 VOID *mIdtForAp = NULL;
63 VOID *mMachineCheckHandlerForAp = NULL;
64 MP_MSR_LOCK *mMsrSpinLocks = NULL;
65 UINTN mMsrSpinLockCount;
66 UINTN mMsrCount = 0;
67
68 /**
69 Get MSR spin lock by MSR index.
70
71 @param MsrIndex MSR index value.
72
73 @return Pointer to MSR spin lock.
74
75 **/
76 SPIN_LOCK *
77 GetMsrSpinLockByIndex (
78 IN UINT32 MsrIndex
79 )
80 {
81 UINTN Index;
82 for (Index = 0; Index < mMsrCount; Index++) {
83 if (MsrIndex == mMsrSpinLocks[Index].MsrIndex) {
84 return mMsrSpinLocks[Index].SpinLock;
85 }
86 }
87 return NULL;
88 }
89
90 /**
91 Initialize MSR spin lock by MSR index.
92
93 @param MsrIndex MSR index value.
94
95 **/
96 VOID
97 InitMsrSpinLockByIndex (
98 IN UINT32 MsrIndex
99 )
100 {
101 UINTN MsrSpinLockCount;
102 UINTN NewMsrSpinLockCount;
103 UINTN Index;
104 UINTN AddedSize;
105
106 if (mMsrSpinLocks == NULL) {
107 MsrSpinLockCount = mSmmCpuSemaphores.SemaphoreMsr.AvailableCounter;
108 mMsrSpinLocks = (MP_MSR_LOCK *) AllocatePool (sizeof (MP_MSR_LOCK) * MsrSpinLockCount);
109 ASSERT (mMsrSpinLocks != NULL);
110 for (Index = 0; Index < MsrSpinLockCount; Index++) {
111 mMsrSpinLocks[Index].SpinLock =
112 (SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreMsr.Msr + Index * mSemaphoreSize);
113 mMsrSpinLocks[Index].MsrIndex = (UINT32)-1;
114 }
115 mMsrSpinLockCount = MsrSpinLockCount;
116 mSmmCpuSemaphores.SemaphoreMsr.AvailableCounter = 0;
117 }
118 if (GetMsrSpinLockByIndex (MsrIndex) == NULL) {
119 //
120 // Initialize spin lock for MSR programming
121 //
122 mMsrSpinLocks[mMsrCount].MsrIndex = MsrIndex;
123 InitializeSpinLock (mMsrSpinLocks[mMsrCount].SpinLock);
124 mMsrCount ++;
125 if (mMsrCount == mMsrSpinLockCount) {
126 //
127 // If MSR spin lock buffer is full, enlarge it
128 //
129 AddedSize = SIZE_4KB;
130 mSmmCpuSemaphores.SemaphoreMsr.Msr =
131 AllocatePages (EFI_SIZE_TO_PAGES(AddedSize));
132 ASSERT (mSmmCpuSemaphores.SemaphoreMsr.Msr != NULL);
133 NewMsrSpinLockCount = mMsrSpinLockCount + AddedSize / mSemaphoreSize;
134 mMsrSpinLocks = ReallocatePool (
135 sizeof (MP_MSR_LOCK) * mMsrSpinLockCount,
136 sizeof (MP_MSR_LOCK) * NewMsrSpinLockCount,
137 mMsrSpinLocks
138 );
139 ASSERT (mMsrSpinLocks != NULL);
140 mMsrSpinLockCount = NewMsrSpinLockCount;
141 for (Index = mMsrCount; Index < mMsrSpinLockCount; Index++) {
142 mMsrSpinLocks[Index].SpinLock =
143 (SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreMsr.Msr +
144 (Index - mMsrCount) * mSemaphoreSize);
145 mMsrSpinLocks[Index].MsrIndex = (UINT32)-1;
146 }
147 }
148 }
149 }
150
151 /**
152 Sync up the MTRR values for all processors.
153
154 @param MtrrTable Table holding fixed/variable MTRR values to be loaded.
155 **/
156 VOID
157 EFIAPI
158 LoadMtrrData (
159 EFI_PHYSICAL_ADDRESS MtrrTable
160 )
161 /*++
162
163 Routine Description:
164
165 Sync up the MTRR values for all processors.
166
167 Arguments:
168
169 Returns:
170 None
171
172 --*/
173 {
174 MTRR_SETTINGS *MtrrSettings;
175
176 MtrrSettings = (MTRR_SETTINGS *) (UINTN) MtrrTable;
177 MtrrSetAllMtrrs (MtrrSettings);
178 }
179
180 /**
181 Programs registers for the calling processor.
182
183 This function programs registers for the calling processor.
184
185 @param RegisterTable Pointer to register table of the running processor.
186
187 **/
188 VOID
189 SetProcessorRegister (
190 IN CPU_REGISTER_TABLE *RegisterTable
191 )
192 {
193 CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;
194 UINTN Index;
195 UINTN Value;
196 SPIN_LOCK *MsrSpinLock;
197
198 //
199 // Traverse Register Table of this logical processor
200 //
201 RegisterTableEntry = (CPU_REGISTER_TABLE_ENTRY *) (UINTN) RegisterTable->RegisterTableEntry;
202 for (Index = 0; Index < RegisterTable->TableLength; Index++, RegisterTableEntry++) {
203 //
204 // Check the type of specified register
205 //
206 switch (RegisterTableEntry->RegisterType) {
207 //
208 // The specified register is Control Register
209 //
210 case ControlRegister:
211 switch (RegisterTableEntry->Index) {
212 case 0:
213 Value = AsmReadCr0 ();
214 Value = (UINTN) BitFieldWrite64 (
215 Value,
216 RegisterTableEntry->ValidBitStart,
217 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
218 (UINTN) RegisterTableEntry->Value
219 );
220 AsmWriteCr0 (Value);
221 break;
222 case 2:
223 Value = AsmReadCr2 ();
224 Value = (UINTN) BitFieldWrite64 (
225 Value,
226 RegisterTableEntry->ValidBitStart,
227 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
228 (UINTN) RegisterTableEntry->Value
229 );
230 AsmWriteCr2 (Value);
231 break;
232 case 3:
233 Value = AsmReadCr3 ();
234 Value = (UINTN) BitFieldWrite64 (
235 Value,
236 RegisterTableEntry->ValidBitStart,
237 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
238 (UINTN) RegisterTableEntry->Value
239 );
240 AsmWriteCr3 (Value);
241 break;
242 case 4:
243 Value = AsmReadCr4 ();
244 Value = (UINTN) BitFieldWrite64 (
245 Value,
246 RegisterTableEntry->ValidBitStart,
247 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
248 (UINTN) RegisterTableEntry->Value
249 );
250 AsmWriteCr4 (Value);
251 break;
252 default:
253 break;
254 }
255 break;
256 //
257 // The specified register is Model Specific Register
258 //
259 case Msr:
260 //
261 // If this function is called to restore register setting after INIT signal,
262 // there is no need to restore MSRs in register table.
263 //
264 if (RegisterTableEntry->ValidBitLength >= 64) {
265 //
266 // If length is not less than 64 bits, then directly write without reading
267 //
268 AsmWriteMsr64 (
269 RegisterTableEntry->Index,
270 RegisterTableEntry->Value
271 );
272 } else {
273 //
274 // Get lock to avoid Package/Core scope MSRs programming issue in parallel execution mode
275 // to make sure MSR read/write operation is atomic.
276 //
277 MsrSpinLock = GetMsrSpinLockByIndex (RegisterTableEntry->Index);
278 AcquireSpinLock (MsrSpinLock);
279 //
280 // Set the bit section according to bit start and length
281 //
282 AsmMsrBitFieldWrite64 (
283 RegisterTableEntry->Index,
284 RegisterTableEntry->ValidBitStart,
285 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
286 RegisterTableEntry->Value
287 );
288 ReleaseSpinLock (MsrSpinLock);
289 }
290 break;
291 //
292 // MemoryMapped operations
293 //
294 case MemoryMapped:
295 AcquireSpinLock (mMemoryMappedLock);
296 MmioBitFieldWrite32 (
297 RegisterTableEntry->Index,
298 RegisterTableEntry->ValidBitStart,
299 RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
300 (UINT32)RegisterTableEntry->Value
301 );
302 ReleaseSpinLock (mMemoryMappedLock);
303 break;
304 //
305 // Enable or disable cache
306 //
307 case CacheControl:
308 //
309 // If value of the entry is 0, then disable cache. Otherwise, enable cache.
310 //
311 if (RegisterTableEntry->Value == 0) {
312 AsmDisableCache ();
313 } else {
314 AsmEnableCache ();
315 }
316 break;
317
318 default:
319 break;
320 }
321 }
322 }
323
324 /**
325 AP initialization before SMBASE relocation in the S3 boot path.
326 **/
327 VOID
328 EarlyMPRendezvousProcedure (
329 VOID
330 )
331 {
332 CPU_REGISTER_TABLE *RegisterTableList;
333 UINT32 InitApicId;
334 UINTN Index;
335
336 LoadMtrrData (mAcpiCpuData.MtrrTable);
337
338 //
339 // Find processor number for this CPU.
340 //
341 RegisterTableList = (CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.PreSmmInitRegisterTable;
342 InitApicId = GetInitialApicId ();
343 for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {
344 if (RegisterTableList[Index].InitialApicId == InitApicId) {
345 SetProcessorRegister (&RegisterTableList[Index]);
346 break;
347 }
348 }
349
350 //
351 // Count down the number with lock mechanism.
352 //
353 InterlockedDecrement (&mNumberToFinish);
354 }
355
356 /**
357 AP initialization after SMBASE relocation in the S3 boot path.
358 **/
359 VOID
360 MPRendezvousProcedure (
361 VOID
362 )
363 {
364 CPU_REGISTER_TABLE *RegisterTableList;
365 UINT32 InitApicId;
366 UINTN Index;
367
368 ProgramVirtualWireMode ();
369 DisableLvtInterrupts ();
370
371 RegisterTableList = (CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.RegisterTable;
372 InitApicId = GetInitialApicId ();
373 for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {
374 if (RegisterTableList[Index].InitialApicId == InitApicId) {
375 SetProcessorRegister (&RegisterTableList[Index]);
376 break;
377 }
378 }
379
380 //
381 // Count down the number with lock mechanism.
382 //
383 InterlockedDecrement (&mNumberToFinish);
384 }
385
386 /**
387 Prepares startup vector for APs.
388
389 This function prepares startup vector for APs.
390
391 @param WorkingBuffer The address of the work buffer.
392 **/
393 VOID
394 PrepareApStartupVector (
395 EFI_PHYSICAL_ADDRESS WorkingBuffer
396 )
397 {
398 EFI_PHYSICAL_ADDRESS StartupVector;
399 MP_ASSEMBLY_ADDRESS_MAP AddressMap;
400
401 //
402 // Get the address map of startup code for AP,
403 // including code size, and offset of long jump instructions to redirect.
404 //
405 ZeroMem (&AddressMap, sizeof (AddressMap));
406 AsmGetAddressMap (&AddressMap);
407
408 StartupVector = WorkingBuffer;
409
410 //
411 // Copy AP startup code to startup vector, and then redirect the long jump
412 // instructions for mode switching.
413 //
414 CopyMem ((VOID *) (UINTN) StartupVector, AddressMap.RendezvousFunnelAddress, AddressMap.Size);
415 *(UINT32 *) (UINTN) (StartupVector + AddressMap.FlatJumpOffset + 3) = (UINT32) (StartupVector + AddressMap.PModeEntryOffset);
416 if (AddressMap.LongJumpOffset != 0) {
417 *(UINT32 *) (UINTN) (StartupVector + AddressMap.LongJumpOffset + 2) = (UINT32) (StartupVector + AddressMap.LModeEntryOffset);
418 }
419
420 //
421 // Get the start address of exchange data between BSP and AP.
422 //
423 mExchangeInfo = (MP_CPU_EXCHANGE_INFO *) (UINTN) (StartupVector + AddressMap.Size);
424 ZeroMem ((VOID *) mExchangeInfo, sizeof (MP_CPU_EXCHANGE_INFO));
425
426 CopyMem ((VOID *) (UINTN) &mExchangeInfo->GdtrProfile, (VOID *) (UINTN) mAcpiCpuData.GdtrProfile, sizeof (IA32_DESCRIPTOR));
427 CopyMem ((VOID *) (UINTN) &mExchangeInfo->IdtrProfile, (VOID *) (UINTN) mAcpiCpuData.IdtrProfile, sizeof (IA32_DESCRIPTOR));
428
429 //
430 // Copy AP's GDT, IDT and Machine Check handler from SMRAM to ACPI NVS memory
431 //
432 CopyMem ((VOID *) mExchangeInfo->GdtrProfile.Base, mGdtForAp, mExchangeInfo->GdtrProfile.Limit + 1);
433 CopyMem ((VOID *) mExchangeInfo->IdtrProfile.Base, mIdtForAp, mExchangeInfo->IdtrProfile.Limit + 1);
434 CopyMem ((VOID *)(UINTN) mAcpiCpuData.ApMachineCheckHandlerBase, mMachineCheckHandlerForAp, mAcpiCpuData.ApMachineCheckHandlerSize);
435
436 mExchangeInfo->StackStart = (VOID *) (UINTN) mAcpiCpuData.StackAddress;
437 mExchangeInfo->StackSize = mAcpiCpuData.StackSize;
438 mExchangeInfo->BufferStart = (UINT32) StartupVector;
439 mExchangeInfo->Cr3 = (UINT32) (AsmReadCr3 ());
440 }
441
442 /**
443 The function is invoked before SMBASE relocation in S3 path to restores CPU status.
444
445 The function is invoked before SMBASE relocation in S3 path. It does first time microcode load
446 and restores MTRRs for both BSP and APs.
447
448 **/
449 VOID
450 EarlyInitializeCpu (
451 VOID
452 )
453 {
454 CPU_REGISTER_TABLE *RegisterTableList;
455 UINT32 InitApicId;
456 UINTN Index;
457
458 LoadMtrrData (mAcpiCpuData.MtrrTable);
459
460 //
461 // Find processor number for this CPU.
462 //
463 RegisterTableList = (CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.PreSmmInitRegisterTable;
464 InitApicId = GetInitialApicId ();
465 for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {
466 if (RegisterTableList[Index].InitialApicId == InitApicId) {
467 SetProcessorRegister (&RegisterTableList[Index]);
468 break;
469 }
470 }
471
472 ProgramVirtualWireMode ();
473
474 PrepareApStartupVector (mAcpiCpuData.StartupVector);
475
476 mNumberToFinish = mAcpiCpuData.NumberOfCpus - 1;
477 mExchangeInfo->ApFunction = (VOID *) (UINTN) EarlyMPRendezvousProcedure;
478
479 //
480 // Send INIT IPI - SIPI to all APs
481 //
482 SendInitSipiSipiAllExcludingSelf ((UINT32)mAcpiCpuData.StartupVector);
483
484 while (mNumberToFinish > 0) {
485 CpuPause ();
486 }
487 }
488
489 /**
490 The function is invoked after SMBASE relocation in S3 path to restores CPU status.
491
492 The function is invoked after SMBASE relocation in S3 path. It restores configuration according to
493 data saved by normal boot path for both BSP and APs.
494
495 **/
496 VOID
497 InitializeCpu (
498 VOID
499 )
500 {
501 CPU_REGISTER_TABLE *RegisterTableList;
502 UINT32 InitApicId;
503 UINTN Index;
504
505 RegisterTableList = (CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.RegisterTable;
506 InitApicId = GetInitialApicId ();
507 for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {
508 if (RegisterTableList[Index].InitialApicId == InitApicId) {
509 SetProcessorRegister (&RegisterTableList[Index]);
510 break;
511 }
512 }
513
514 mNumberToFinish = mAcpiCpuData.NumberOfCpus - 1;
515 //
516 // StackStart was updated when APs were waken up in EarlyInitializeCpu.
517 // Re-initialize StackAddress to original beginning address.
518 //
519 mExchangeInfo->StackStart = (VOID *) (UINTN) mAcpiCpuData.StackAddress;
520 mExchangeInfo->ApFunction = (VOID *) (UINTN) MPRendezvousProcedure;
521
522 //
523 // Send INIT IPI - SIPI to all APs
524 //
525 SendInitSipiSipiAllExcludingSelf ((UINT32)mAcpiCpuData.StartupVector);
526
527 while (mNumberToFinish > 0) {
528 CpuPause ();
529 }
530 }