]> git.proxmox.com Git - mirror_edk2.git/blob - UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmFuncsArch.c
UefiCpuPkg/PiSmmCpuDxeSmm: Add paging protection.
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / Ia32 / SmmFuncsArch.c
1 /** @file
2 SMM CPU misc functions for Ia32 arch specific.
3
4 Copyright (c) 2015 - 2016, Intel Corporation. All rights reserved.<BR>
5 This program and the accompanying materials
6 are licensed and made available under the terms and conditions of the BSD License
7 which accompanies this distribution. The full text of the license may be found at
8 http://opensource.org/licenses/bsd-license.php
9
10 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
11 WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
12
13 **/
14
15 #include "PiSmmCpuDxeSmm.h"
16
17 extern UINT64 gTaskGateDescriptor;
18
19 EFI_PHYSICAL_ADDRESS mGdtBuffer;
20 UINTN mGdtBufferSize;
21
22 /**
23 Initialize IDT for SMM Stack Guard.
24
25 **/
26 VOID
27 EFIAPI
28 InitializeIDTSmmStackGuard (
29 VOID
30 )
31 {
32 IA32_IDT_GATE_DESCRIPTOR *IdtGate;
33
34 //
35 // If SMM Stack Guard feature is enabled, the Page Fault Exception entry in IDT
36 // is a Task Gate Descriptor so that when a Page Fault Exception occurs,
37 // the processors can use a known good stack in case stack is ran out.
38 //
39 IdtGate = (IA32_IDT_GATE_DESCRIPTOR *)gcSmiIdtr.Base;
40 IdtGate += EXCEPT_IA32_PAGE_FAULT;
41 IdtGate->Uint64 = gTaskGateDescriptor;
42 }
43
44 /**
45 Initialize Gdt for all processors.
46
47 @param[in] Cr3 CR3 value.
48 @param[out] GdtStepSize The step size for GDT table.
49
50 @return GdtBase for processor 0.
51 GdtBase for processor X is: GdtBase + (GdtStepSize * X)
52 **/
53 VOID *
54 InitGdt (
55 IN UINTN Cr3,
56 OUT UINTN *GdtStepSize
57 )
58 {
59 UINTN Index;
60 IA32_SEGMENT_DESCRIPTOR *GdtDescriptor;
61 UINTN TssBase;
62 UINTN GdtTssTableSize;
63 UINT8 *GdtTssTables;
64 UINTN GdtTableStepSize;
65
66 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
67 //
68 // For IA32 SMM, if SMM Stack Guard feature is enabled, we use 2 TSS.
69 // in this case, we allocate separate GDT/TSS for each CPUs to avoid TSS load contention
70 // on each SMI entry.
71 //
72
73 //
74 // Enlarge GDT to contain 2 TSS descriptors
75 //
76 gcSmiGdtr.Limit += (UINT16)(2 * sizeof (IA32_SEGMENT_DESCRIPTOR));
77
78 GdtTssTableSize = (gcSmiGdtr.Limit + 1 + TSS_SIZE * 2 + 7) & ~7; // 8 bytes aligned
79 mGdtBufferSize = GdtTssTableSize * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
80 GdtTssTables = (UINT8*)AllocateCodePages (EFI_SIZE_TO_PAGES (mGdtBufferSize));
81 ASSERT (GdtTssTables != NULL);
82 mGdtBuffer = (UINTN)GdtTssTables;
83 GdtTableStepSize = GdtTssTableSize;
84
85 for (Index = 0; Index < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; Index++) {
86 CopyMem (GdtTssTables + GdtTableStepSize * Index, (VOID*)(UINTN)gcSmiGdtr.Base, gcSmiGdtr.Limit + 1 + TSS_SIZE * 2);
87 //
88 // Fixup TSS descriptors
89 //
90 TssBase = (UINTN)(GdtTssTables + GdtTableStepSize * Index + gcSmiGdtr.Limit + 1);
91 GdtDescriptor = (IA32_SEGMENT_DESCRIPTOR *)(TssBase) - 2;
92 GdtDescriptor->Bits.BaseLow = (UINT16)TssBase;
93 GdtDescriptor->Bits.BaseMid = (UINT8)(TssBase >> 16);
94 GdtDescriptor->Bits.BaseHigh = (UINT8)(TssBase >> 24);
95
96 TssBase += TSS_SIZE;
97 GdtDescriptor++;
98 GdtDescriptor->Bits.BaseLow = (UINT16)TssBase;
99 GdtDescriptor->Bits.BaseMid = (UINT8)(TssBase >> 16);
100 GdtDescriptor->Bits.BaseHigh = (UINT8)(TssBase >> 24);
101 //
102 // Fixup TSS segments
103 //
104 // ESP as known good stack
105 //
106 *(UINTN *)(TssBase + TSS_IA32_ESP_OFFSET) = mSmmStackArrayBase + EFI_PAGE_SIZE + Index * mSmmStackSize;
107 *(UINT32 *)(TssBase + TSS_IA32_CR3_OFFSET) = Cr3;
108 }
109 } else {
110 //
111 // Just use original table, AllocatePage and copy them here to make sure GDTs are covered in page memory.
112 //
113 GdtTssTableSize = gcSmiGdtr.Limit + 1;
114 mGdtBufferSize = GdtTssTableSize * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
115 GdtTssTables = (UINT8*)AllocateCodePages (EFI_SIZE_TO_PAGES (mGdtBufferSize));
116 ASSERT (GdtTssTables != NULL);
117 mGdtBuffer = (UINTN)GdtTssTables;
118 GdtTableStepSize = GdtTssTableSize;
119
120 for (Index = 0; Index < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; Index++) {
121 CopyMem (GdtTssTables + GdtTableStepSize * Index, (VOID*)(UINTN)gcSmiGdtr.Base, gcSmiGdtr.Limit + 1);
122 }
123 }
124
125 *GdtStepSize = GdtTableStepSize;
126 return GdtTssTables;
127 }
128
129 /**
130 Transfer AP to safe hlt-loop after it finished restore CPU features on S3 patch.
131
132 @param[in] ApHltLoopCode The 32-bit address of the safe hlt-loop function.
133 @param[in] TopOfStack A pointer to the new stack to use for the ApHltLoopCode.
134 @param[in] NumberToFinish Semaphore of APs finish count.
135
136 **/
137 VOID
138 TransferApToSafeState (
139 IN UINT32 ApHltLoopCode,
140 IN UINT32 TopOfStack,
141 IN UINT32 *NumberToFinish
142 )
143 {
144 SwitchStack (
145 (SWITCH_STACK_ENTRY_POINT) (UINTN) ApHltLoopCode,
146 NumberToFinish,
147 NULL,
148 (VOID *) (UINTN) TopOfStack
149 );
150 //
151 // It should never reach here
152 //
153 ASSERT (FALSE);
154 }