2 SMM CPU misc functions for Ia32 arch specific.
4 Copyright (c) 2015 - 2016, Intel Corporation. All rights reserved.<BR>
5 This program and the accompanying materials
6 are licensed and made available under the terms and conditions of the BSD License
7 which accompanies this distribution. The full text of the license may be found at
8 http://opensource.org/licenses/bsd-license.php
10 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
11 WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
15 #include "PiSmmCpuDxeSmm.h"
17 extern UINT64 gTaskGateDescriptor
;
19 EFI_PHYSICAL_ADDRESS mGdtBuffer
;
23 Initialize IDT for SMM Stack Guard.
28 InitializeIDTSmmStackGuard (
32 IA32_IDT_GATE_DESCRIPTOR
*IdtGate
;
35 // If SMM Stack Guard feature is enabled, the Page Fault Exception entry in IDT
36 // is a Task Gate Descriptor so that when a Page Fault Exception occurs,
37 // the processors can use a known good stack in case stack is ran out.
39 IdtGate
= (IA32_IDT_GATE_DESCRIPTOR
*)gcSmiIdtr
.Base
;
40 IdtGate
+= EXCEPT_IA32_PAGE_FAULT
;
41 IdtGate
->Uint64
= gTaskGateDescriptor
;
45 Initialize Gdt for all processors.
47 @param[in] Cr3 CR3 value.
48 @param[out] GdtStepSize The step size for GDT table.
50 @return GdtBase for processor 0.
51 GdtBase for processor X is: GdtBase + (GdtStepSize * X)
56 OUT UINTN
*GdtStepSize
60 IA32_SEGMENT_DESCRIPTOR
*GdtDescriptor
;
62 UINTN GdtTssTableSize
;
64 UINTN GdtTableStepSize
;
66 if (FeaturePcdGet (PcdCpuSmmStackGuard
)) {
68 // For IA32 SMM, if SMM Stack Guard feature is enabled, we use 2 TSS.
69 // in this case, we allocate separate GDT/TSS for each CPUs to avoid TSS load contention
74 // Enlarge GDT to contain 2 TSS descriptors
76 gcSmiGdtr
.Limit
+= (UINT16
)(2 * sizeof (IA32_SEGMENT_DESCRIPTOR
));
78 GdtTssTableSize
= (gcSmiGdtr
.Limit
+ 1 + TSS_SIZE
* 2 + 7) & ~7; // 8 bytes aligned
79 mGdtBufferSize
= GdtTssTableSize
* gSmmCpuPrivate
->SmmCoreEntryContext
.NumberOfCpus
;
80 GdtTssTables
= (UINT8
*)AllocateCodePages (EFI_SIZE_TO_PAGES (mGdtBufferSize
));
81 ASSERT (GdtTssTables
!= NULL
);
82 mGdtBuffer
= (UINTN
)GdtTssTables
;
83 GdtTableStepSize
= GdtTssTableSize
;
85 for (Index
= 0; Index
< gSmmCpuPrivate
->SmmCoreEntryContext
.NumberOfCpus
; Index
++) {
86 CopyMem (GdtTssTables
+ GdtTableStepSize
* Index
, (VOID
*)(UINTN
)gcSmiGdtr
.Base
, gcSmiGdtr
.Limit
+ 1 + TSS_SIZE
* 2);
88 // Fixup TSS descriptors
90 TssBase
= (UINTN
)(GdtTssTables
+ GdtTableStepSize
* Index
+ gcSmiGdtr
.Limit
+ 1);
91 GdtDescriptor
= (IA32_SEGMENT_DESCRIPTOR
*)(TssBase
) - 2;
92 GdtDescriptor
->Bits
.BaseLow
= (UINT16
)TssBase
;
93 GdtDescriptor
->Bits
.BaseMid
= (UINT8
)(TssBase
>> 16);
94 GdtDescriptor
->Bits
.BaseHigh
= (UINT8
)(TssBase
>> 24);
98 GdtDescriptor
->Bits
.BaseLow
= (UINT16
)TssBase
;
99 GdtDescriptor
->Bits
.BaseMid
= (UINT8
)(TssBase
>> 16);
100 GdtDescriptor
->Bits
.BaseHigh
= (UINT8
)(TssBase
>> 24);
102 // Fixup TSS segments
104 // ESP as known good stack
106 *(UINTN
*)(TssBase
+ TSS_IA32_ESP_OFFSET
) = mSmmStackArrayBase
+ EFI_PAGE_SIZE
+ Index
* mSmmStackSize
;
107 *(UINT32
*)(TssBase
+ TSS_IA32_CR3_OFFSET
) = Cr3
;
111 // Just use original table, AllocatePage and copy them here to make sure GDTs are covered in page memory.
113 GdtTssTableSize
= gcSmiGdtr
.Limit
+ 1;
114 mGdtBufferSize
= GdtTssTableSize
* gSmmCpuPrivate
->SmmCoreEntryContext
.NumberOfCpus
;
115 GdtTssTables
= (UINT8
*)AllocateCodePages (EFI_SIZE_TO_PAGES (mGdtBufferSize
));
116 ASSERT (GdtTssTables
!= NULL
);
117 mGdtBuffer
= (UINTN
)GdtTssTables
;
118 GdtTableStepSize
= GdtTssTableSize
;
120 for (Index
= 0; Index
< gSmmCpuPrivate
->SmmCoreEntryContext
.NumberOfCpus
; Index
++) {
121 CopyMem (GdtTssTables
+ GdtTableStepSize
* Index
, (VOID
*)(UINTN
)gcSmiGdtr
.Base
, gcSmiGdtr
.Limit
+ 1);
125 *GdtStepSize
= GdtTableStepSize
;
130 Transfer AP to safe hlt-loop after it finished restore CPU features on S3 patch.
132 @param[in] ApHltLoopCode The 32-bit address of the safe hlt-loop function.
133 @param[in] TopOfStack A pointer to the new stack to use for the ApHltLoopCode.
134 @param[in] NumberToFinish Semaphore of APs finish count.
138 TransferApToSafeState (
139 IN UINT32 ApHltLoopCode
,
140 IN UINT32 TopOfStack
,
141 IN UINT32
*NumberToFinish
145 (SWITCH_STACK_ENTRY_POINT
) (UINTN
) ApHltLoopCode
,
148 (VOID
*) (UINTN
) TopOfStack
151 // It should never reach here