]>
Commit | Line | Data |
---|---|---|
09119a00 MK |
1 | #------------------------------------------------------------------------------\r |
2 | #\r | |
3 | # Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.<BR>\r | |
4 | # This program and the accompanying materials\r | |
5 | # are licensed and made available under the terms and conditions of the BSD License\r | |
6 | # which accompanies this distribution. The full text of the license may be found at\r | |
7 | # http://opensource.org/licenses/bsd-license.php.\r | |
8 | #\r | |
9 | # THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r | |
10 | # WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r | |
11 | #\r | |
12 | # Module Name:\r | |
13 | #\r | |
14 | # SmiEntry.S\r | |
15 | #\r | |
16 | # Abstract:\r | |
17 | #\r | |
18 | # Code template of the SMI handler for a particular processor\r | |
19 | #\r | |
20 | #------------------------------------------------------------------------------\r | |
21 | \r | |
22 | ASM_GLOBAL ASM_PFX(gcStmSmiHandlerTemplate)\r | |
23 | ASM_GLOBAL ASM_PFX(gcStmSmiHandlerSize)\r | |
24 | ASM_GLOBAL ASM_PFX(gcStmSmiHandlerOffset)\r | |
25 | ASM_GLOBAL ASM_PFX(gStmSmiCr3)\r | |
26 | ASM_GLOBAL ASM_PFX(gStmSmiStack)\r | |
27 | ASM_GLOBAL ASM_PFX(gStmSmbase)\r | |
28 | ASM_GLOBAL ASM_PFX(gStmXdSupported)\r | |
29 | ASM_GLOBAL ASM_PFX(FeaturePcdGet (PcdCpuSmmStackGuard))\r | |
30 | ASM_GLOBAL ASM_PFX(gStmSmiHandlerIdtr)\r | |
31 | \r | |
32 | .equ MSR_IA32_MISC_ENABLE, 0x1A0\r | |
33 | .equ MSR_EFER, 0xc0000080\r | |
34 | .equ MSR_EFER_XD, 0x800\r | |
35 | \r | |
36 | #\r | |
37 | # Constants relating to TXT_PROCESSOR_SMM_DESCRIPTOR\r | |
38 | #\r | |
39 | .equ DSC_OFFSET, 0xfb00\r | |
40 | .equ DSC_GDTPTR, 0x48\r | |
41 | .equ DSC_GDTSIZ, 0x50\r | |
42 | .equ DSC_CS, 0x14\r | |
43 | .equ DSC_DS, 0x16\r | |
44 | .equ DSC_SS, 0x18\r | |
45 | .equ DSC_OTHERSEG, 0x1A\r | |
46 | \r | |
47 | .equ PROTECT_MODE_CS, 0x08\r | |
48 | .equ PROTECT_MODE_DS, 0x20\r | |
49 | .equ TSS_SEGMENT, 0x40\r | |
50 | \r | |
51 | .text\r | |
52 | ASM_PFX(gcStmSmiHandlerTemplate):\r | |
53 | \r | |
54 | _StmSmiEntryPoint:\r | |
55 | .byte 0xbb # mov bx, imm16\r | |
56 | .word _StmGdtDesc - _StmSmiEntryPoint + 0x8000\r | |
57 | .byte 0x2e,0xa1 # mov ax, cs:[offset16]\r | |
58 | .word DSC_OFFSET + DSC_GDTSIZ\r | |
59 | decl %eax\r | |
60 | movl %eax, %cs:(%edi) # mov cs:[bx], ax\r | |
61 | .byte 0x66,0x2e,0xa1 # mov eax, cs:[offset16]\r | |
62 | .word DSC_OFFSET + DSC_GDTPTR\r | |
63 | movw %ax, %cs:2(%edi)\r | |
64 | movw %ax, %bp # ebp = GDT base\r | |
65 | .byte 0x66\r | |
66 | lgdt %cs:(%edi)\r | |
67 | # Patch ProtectedMode Segment\r | |
68 | .byte 0xb8 # mov ax, imm16\r | |
69 | .word PROTECT_MODE_CS # set AX for segment directly\r | |
70 | movl %eax, %cs:-2(%edi) # mov cs:[bx - 2], ax\r | |
71 | # Patch ProtectedMode entry\r | |
72 | .byte 0x66, 0xbf # mov edi, SMBASE\r | |
73 | ASM_PFX(gStmSmbase): .space 4\r | |
74 | .byte 0x67\r | |
75 | lea ((Start32bit - _StmSmiEntryPoint) + 0x8000)(%edi), %ax\r | |
76 | movw %ax, %cs:-6(%edi)\r | |
77 | movl %cr0, %ebx\r | |
78 | .byte 0x66\r | |
79 | andl $0x9ffafff3, %ebx\r | |
80 | .byte 0x66\r | |
81 | orl $0x23, %ebx\r | |
82 | movl %ebx, %cr0\r | |
83 | .byte 0x66,0xea\r | |
84 | .space 4\r | |
85 | .space 2\r | |
86 | _StmGdtDesc: .space 4\r | |
87 | .space 2\r | |
88 | \r | |
89 | Start32bit:\r | |
90 | movw $PROTECT_MODE_DS, %ax\r | |
91 | movl %eax,%ds\r | |
92 | movl %eax,%es\r | |
93 | movl %eax,%fs\r | |
94 | movl %eax,%gs\r | |
95 | movl %eax,%ss\r | |
96 | .byte 0xbc # mov esp, imm32\r | |
97 | ASM_PFX(gStmSmiStack): .space 4\r | |
98 | movl $ASM_PFX(gStmSmiHandlerIdtr), %eax\r | |
99 | lidt (%eax)\r | |
100 | jmp ProtFlatMode\r | |
101 | \r | |
102 | ProtFlatMode:\r | |
103 | .byte 0xb8 # mov eax, imm32\r | |
104 | ASM_PFX(gStmSmiCr3): .space 4\r | |
105 | movl %eax, %cr3\r | |
106 | #\r | |
107 | # Need to test for CR4 specific bit support\r | |
108 | #\r | |
109 | movl $1, %eax\r | |
110 | cpuid # use CPUID to determine if specific CR4 bits are supported\r | |
111 | xorl %eax, %eax # Clear EAX\r | |
112 | testl $BIT2, %edx # Check for DE capabilities\r | |
113 | jz L8\r | |
114 | orl $BIT3, %eax\r | |
115 | L8:\r | |
116 | testl $BIT6, %edx # Check for PAE capabilities\r | |
117 | jz L9\r | |
118 | orl $BIT5, %eax\r | |
119 | L9:\r | |
120 | testl $BIT7, %edx # Check for MCE capabilities\r | |
121 | jz L10\r | |
122 | orl $BIT6, %eax\r | |
123 | L10:\r | |
124 | testl $BIT24, %edx # Check for FXSR capabilities\r | |
125 | jz L11\r | |
126 | orl $BIT9, %eax\r | |
127 | L11:\r | |
128 | testl $BIT25, %edx # Check for SSE capabilities\r | |
129 | jz L12\r | |
130 | orl $BIT10, %eax\r | |
131 | L12: # as cr4.PGE is not set here, refresh cr3\r | |
132 | movl %eax, %cr4 # in PreModifyMtrrs() to flush TLB.\r | |
133 | \r | |
134 | cmpb $0, ASM_PFX(FeaturePcdGet (PcdCpuSmmStackGuard))\r | |
135 | jz L5\r | |
136 | # Load TSS\r | |
137 | movb $0x89, (TSS_SEGMENT + 5)(%ebp) # clear busy flag\r | |
138 | movl $TSS_SEGMENT, %eax\r | |
139 | ltrw %ax\r | |
140 | L5:\r | |
141 | \r | |
142 | # enable NXE if supported\r | |
143 | .byte 0xb0 # mov al, imm8\r | |
144 | ASM_PFX(gStmXdSupported): .byte 1\r | |
145 | cmpb $0, %al\r | |
146 | jz SkipXd\r | |
147 | #\r | |
148 | # Check XD disable bit\r | |
149 | #\r | |
150 | movl $MSR_IA32_MISC_ENABLE, %ecx\r | |
151 | rdmsr\r | |
152 | pushl %edx # save MSR_IA32_MISC_ENABLE[63-32]\r | |
153 | testl $BIT2, %edx # MSR_IA32_MISC_ENABLE[34]\r | |
154 | jz L13\r | |
155 | andw $0x0FFFB, %dx # clear XD Disable bit if it is set\r | |
156 | wrmsr\r | |
157 | L13:\r | |
158 | movl $MSR_EFER, %ecx\r | |
159 | rdmsr\r | |
160 | orw $MSR_EFER_XD,%ax # enable NXE\r | |
161 | wrmsr\r | |
162 | jmp XdDone\r | |
163 | SkipXd:\r | |
164 | subl $4, %esp\r | |
165 | XdDone:\r | |
166 | \r | |
167 | movl %cr0, %ebx\r | |
168 | orl $0x080010023, %ebx # enable paging + WP + NE + MP + PE\r | |
169 | movl %ebx, %cr0\r | |
170 | leal DSC_OFFSET(%edi),%ebx\r | |
171 | movw DSC_DS(%ebx),%ax\r | |
172 | movl %eax, %ds\r | |
173 | movw DSC_OTHERSEG(%ebx),%ax\r | |
174 | movl %eax, %es\r | |
175 | movl %eax, %fs\r | |
176 | movl %eax, %gs\r | |
177 | movw DSC_SS(%ebx),%ax\r | |
178 | movl %eax, %ss\r | |
179 | \r | |
180 | CommonHandler:\r | |
181 | movl 4(%esp), %ebx\r | |
182 | \r | |
183 | pushl %ebx\r | |
184 | movl $ASM_PFX(CpuSmmDebugEntry), %eax\r | |
185 | call *%eax\r | |
186 | addl $4, %esp\r | |
187 | \r | |
188 | pushl %ebx\r | |
189 | movl $ASM_PFX(SmiRendezvous), %eax\r | |
190 | call *%eax\r | |
191 | addl $4, %esp\r | |
192 | \r | |
193 | pushl %ebx\r | |
194 | movl $ASM_PFX(CpuSmmDebugExit), %eax\r | |
195 | call *%eax\r | |
196 | addl $4, %esp\r | |
197 | \r | |
198 | movl $ASM_PFX(gStmXdSupported), %eax\r | |
199 | movb (%eax), %al\r | |
200 | cmpb $0, %al\r | |
201 | jz L16\r | |
202 | popl %edx # get saved MSR_IA32_MISC_ENABLE[63-32]\r | |
203 | testl $BIT2, %edx\r | |
204 | jz L16\r | |
205 | movl $MSR_IA32_MISC_ENABLE, %ecx\r | |
206 | rdmsr\r | |
207 | orw $BIT2, %dx # set XD Disable bit if it was set before entering into SMM\r | |
208 | wrmsr\r | |
209 | \r | |
210 | L16:\r | |
211 | rsm\r | |
212 | \r | |
213 | _StmSmiHandler:\r | |
214 | #\r | |
215 | # Check XD disable bit\r | |
216 | #\r | |
217 | xorl %esi, %esi\r | |
218 | movl $ASM_PFX(gStmXdSupported), %eax\r | |
219 | movb (%eax), %al\r | |
220 | cmpb $0, %al\r | |
221 | jz StmXdDone\r | |
222 | movl $MSR_IA32_MISC_ENABLE, %ecx\r | |
223 | rdmsr\r | |
224 | movl %edx, %esi # save MSR_IA32_MISC_ENABLE[63-32]\r | |
225 | testl $BIT2, %edx # MSR_IA32_MISC_ENABLE[34]\r | |
226 | jz L14\r | |
227 | andw $0x0FFFB, %dx # clear XD Disable bit if it is set\r | |
228 | wrmsr\r | |
229 | L14:\r | |
230 | movl $MSR_EFER, %ecx\r | |
231 | rdmsr\r | |
232 | orw $MSR_EFER_XD,%ax # enable NXE\r | |
233 | wrmsr\r | |
234 | StmXdDone:\r | |
235 | push %esi\r | |
236 | \r | |
237 | # below step is needed, because STM does not run above code.\r | |
238 | # we have to run below code to set IDT/CR0/CR4\r | |
239 | movl $ASM_PFX(gStmSmiHandlerIdtr), %eax\r | |
240 | lidt (%eax)\r | |
241 | \r | |
242 | movl %cr0, %eax\r | |
243 | orl $0x80010023, %eax # enable paging + WP + NE + MP + PE\r | |
244 | movl %eax, %cr0\r | |
245 | #\r | |
246 | # Need to test for CR4 specific bit support\r | |
247 | #\r | |
248 | movl $1, %eax\r | |
249 | cpuid # use CPUID to determine if specific CR4 bits are supported\r | |
250 | movl %cr4, %eax # init EAX\r | |
251 | testl $BIT2, %edx # Check for DE capabilities\r | |
252 | jz L28\r | |
253 | orl $BIT3, %eax\r | |
254 | L28:\r | |
255 | testl $BIT6, %edx # Check for PAE capabilities\r | |
256 | jz L29\r | |
257 | orl $BIT5, %eax\r | |
258 | L29:\r | |
259 | testl $BIT7, %edx # Check for MCE capabilities\r | |
260 | jz L30\r | |
261 | orl $BIT6, %eax\r | |
262 | L30:\r | |
263 | testl $BIT24, %edx # Check for FXSR capabilities\r | |
264 | jz L31\r | |
265 | orl $BIT9, %eax\r | |
266 | L31:\r | |
267 | testl $BIT25, %edx # Check for SSE capabilities\r | |
268 | jz L32\r | |
269 | orl $BIT10, %eax\r | |
270 | L32: # as cr4.PGE is not set here, refresh cr3\r | |
271 | movl %eax, %cr4 # in PreModifyMtrrs() to flush TLB.\r | |
272 | # STM init finish\r | |
273 | jmp CommonHandler\r | |
274 | \r | |
275 | \r | |
276 | ASM_PFX(gcStmSmiHandlerSize) : .word . - _StmSmiEntryPoint\r | |
277 | ASM_PFX(gcStmSmiHandlerOffset): .word _StmSmiHandler - _StmSmiEntryPoint\r | |
278 | \r |