]> git.proxmox.com Git - mirror_edk2.git/blob - UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmFuncsArch.c
UefiCpuPkg/PiSmmCpuDxeSmm: patch "gSmiStack" with PatchInstructionX86()
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / Ia32 / SmmFuncsArch.c
1 /** @file
2 SMM CPU misc functions for Ia32 arch specific.
3
4 Copyright (c) 2015 - 2018, Intel Corporation. All rights reserved.<BR>
5 This program and the accompanying materials
6 are licensed and made available under the terms and conditions of the BSD License
7 which accompanies this distribution. The full text of the license may be found at
8 http://opensource.org/licenses/bsd-license.php
9
10 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
11 WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
12
13 **/
14
15 #include "PiSmmCpuDxeSmm.h"
16
17 extern UINT64 gTaskGateDescriptor;
18
19 EFI_PHYSICAL_ADDRESS mGdtBuffer;
20 UINTN mGdtBufferSize;
21
22 /**
23 Initialize IDT for SMM Stack Guard.
24
25 **/
26 VOID
27 EFIAPI
28 InitializeIDTSmmStackGuard (
29 VOID
30 )
31 {
32 IA32_IDT_GATE_DESCRIPTOR *IdtGate;
33
34 //
35 // If SMM Stack Guard feature is enabled, the Page Fault Exception entry in IDT
36 // is a Task Gate Descriptor so that when a Page Fault Exception occurs,
37 // the processors can use a known good stack in case stack is ran out.
38 //
39 IdtGate = (IA32_IDT_GATE_DESCRIPTOR *)gcSmiIdtr.Base;
40 IdtGate += EXCEPT_IA32_PAGE_FAULT;
41 IdtGate->Uint64 = gTaskGateDescriptor;
42 }
43
44 /**
45 Initialize Gdt for all processors.
46
47 @param[in] Cr3 CR3 value.
48 @param[out] GdtStepSize The step size for GDT table.
49
50 @return GdtBase for processor 0.
51 GdtBase for processor X is: GdtBase + (GdtStepSize * X)
52 **/
53 VOID *
54 InitGdt (
55 IN UINTN Cr3,
56 OUT UINTN *GdtStepSize
57 )
58 {
59 UINTN Index;
60 IA32_SEGMENT_DESCRIPTOR *GdtDescriptor;
61 UINTN TssBase;
62 UINTN GdtTssTableSize;
63 UINT8 *GdtTssTables;
64 UINTN GdtTableStepSize;
65
66 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
67 //
68 // For IA32 SMM, if SMM Stack Guard feature is enabled, we use 2 TSS.
69 // in this case, we allocate separate GDT/TSS for each CPUs to avoid TSS load contention
70 // on each SMI entry.
71 //
72
73 //
74 // Enlarge GDT to contain 2 TSS descriptors
75 //
76 gcSmiGdtr.Limit += (UINT16)(2 * sizeof (IA32_SEGMENT_DESCRIPTOR));
77
78 GdtTssTableSize = (gcSmiGdtr.Limit + 1 + TSS_SIZE * 2 + 7) & ~7; // 8 bytes aligned
79 mGdtBufferSize = GdtTssTableSize * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
80 //
81 // IA32 Stack Guard need use task switch to switch stack that need
82 // write GDT and TSS, so AllocateCodePages() could not be used here
83 // as code pages will be set to RO.
84 //
85 GdtTssTables = (UINT8*)AllocatePages (EFI_SIZE_TO_PAGES (mGdtBufferSize));
86 ASSERT (GdtTssTables != NULL);
87 mGdtBuffer = (UINTN)GdtTssTables;
88 GdtTableStepSize = GdtTssTableSize;
89
90 for (Index = 0; Index < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; Index++) {
91 CopyMem (GdtTssTables + GdtTableStepSize * Index, (VOID*)(UINTN)gcSmiGdtr.Base, gcSmiGdtr.Limit + 1 + TSS_SIZE * 2);
92 //
93 // Fixup TSS descriptors
94 //
95 TssBase = (UINTN)(GdtTssTables + GdtTableStepSize * Index + gcSmiGdtr.Limit + 1);
96 GdtDescriptor = (IA32_SEGMENT_DESCRIPTOR *)(TssBase) - 2;
97 GdtDescriptor->Bits.BaseLow = (UINT16)TssBase;
98 GdtDescriptor->Bits.BaseMid = (UINT8)(TssBase >> 16);
99 GdtDescriptor->Bits.BaseHigh = (UINT8)(TssBase >> 24);
100
101 TssBase += TSS_SIZE;
102 GdtDescriptor++;
103 GdtDescriptor->Bits.BaseLow = (UINT16)TssBase;
104 GdtDescriptor->Bits.BaseMid = (UINT8)(TssBase >> 16);
105 GdtDescriptor->Bits.BaseHigh = (UINT8)(TssBase >> 24);
106 //
107 // Fixup TSS segments
108 //
109 // ESP as known good stack
110 //
111 *(UINTN *)(TssBase + TSS_IA32_ESP_OFFSET) = mSmmStackArrayBase + EFI_PAGE_SIZE + Index * mSmmStackSize;
112 *(UINT32 *)(TssBase + TSS_IA32_CR3_OFFSET) = Cr3;
113 }
114 } else {
115 //
116 // Just use original table, AllocatePage and copy them here to make sure GDTs are covered in page memory.
117 //
118 GdtTssTableSize = gcSmiGdtr.Limit + 1;
119 mGdtBufferSize = GdtTssTableSize * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
120 GdtTssTables = (UINT8*)AllocateCodePages (EFI_SIZE_TO_PAGES (mGdtBufferSize));
121 ASSERT (GdtTssTables != NULL);
122 mGdtBuffer = (UINTN)GdtTssTables;
123 GdtTableStepSize = GdtTssTableSize;
124
125 for (Index = 0; Index < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; Index++) {
126 CopyMem (GdtTssTables + GdtTableStepSize * Index, (VOID*)(UINTN)gcSmiGdtr.Base, gcSmiGdtr.Limit + 1);
127 }
128 }
129
130 *GdtStepSize = GdtTableStepSize;
131 return GdtTssTables;
132 }
133
134 /**
135 Transfer AP to safe hlt-loop after it finished restore CPU features on S3 patch.
136
137 @param[in] ApHltLoopCode The address of the safe hlt-loop function.
138 @param[in] TopOfStack A pointer to the new stack to use for the ApHltLoopCode.
139 @param[in] NumberToFinishAddress Address of Semaphore of APs finish count.
140
141 **/
142 VOID
143 TransferApToSafeState (
144 IN UINTN ApHltLoopCode,
145 IN UINTN TopOfStack,
146 IN UINTN NumberToFinishAddress
147 )
148 {
149 SwitchStack (
150 (SWITCH_STACK_ENTRY_POINT)ApHltLoopCode,
151 (VOID *)NumberToFinishAddress,
152 NULL,
153 (VOID *)TopOfStack
154 );
155 //
156 // It should never reach here
157 //
158 ASSERT (FALSE);
159 }