]>
Commit | Line | Data |
---|---|---|
1 | #------------------------------------------------------------------------------\r | |
2 | #\r | |
3 | # Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.<BR>\r | |
4 | # This program and the accompanying materials\r | |
5 | # are licensed and made available under the terms and conditions of the BSD License\r | |
6 | # which accompanies this distribution. The full text of the license may be found at\r | |
7 | # http://opensource.org/licenses/bsd-license.php.\r | |
8 | #\r | |
9 | # THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r | |
10 | # WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r | |
11 | #\r | |
12 | # Module Name:\r | |
13 | #\r | |
14 | # SmiEntry.S\r | |
15 | #\r | |
16 | # Abstract:\r | |
17 | #\r | |
18 | # Code template of the SMI handler for a particular processor\r | |
19 | #\r | |
20 | #------------------------------------------------------------------------------\r | |
21 | \r | |
22 | ASM_GLOBAL ASM_PFX(gcSmiHandlerTemplate)\r | |
23 | ASM_GLOBAL ASM_PFX(gcSmiHandlerSize)\r | |
24 | ASM_GLOBAL ASM_PFX(gSmiCr3)\r | |
25 | ASM_GLOBAL ASM_PFX(gSmiStack)\r | |
26 | ASM_GLOBAL ASM_PFX(gSmbase)\r | |
27 | ASM_GLOBAL ASM_PFX(mXdSupported)\r | |
28 | ASM_GLOBAL ASM_PFX(FeaturePcdGet (PcdCpuSmmStackGuard))\r | |
29 | ASM_GLOBAL ASM_PFX(gSmiHandlerIdtr)\r | |
30 | \r | |
31 | .equ MSR_EFER, 0xc0000080\r | |
32 | .equ MSR_EFER_XD, 0x800\r | |
33 | \r | |
34 | #\r | |
35 | # Constants relating to PROCESSOR_SMM_DESCRIPTOR\r | |
36 | #\r | |
37 | .equ DSC_OFFSET, 0xfb00\r | |
38 | .equ DSC_GDTPTR, 0x30\r | |
39 | .equ DSC_GDTSIZ, 0x38\r | |
40 | .equ DSC_CS, 14\r | |
41 | .equ DSC_DS, 16\r | |
42 | .equ DSC_SS, 18\r | |
43 | .equ DSC_OTHERSEG, 20\r | |
44 | \r | |
45 | .equ PROTECT_MODE_CS, 0x08\r | |
46 | .equ PROTECT_MODE_DS, 0x20\r | |
47 | .equ TSS_SEGMENT, 0x40\r | |
48 | \r | |
49 | .text\r | |
50 | \r | |
51 | ASM_PFX(gcSmiHandlerTemplate):\r | |
52 | \r | |
53 | _SmiEntryPoint:\r | |
54 | .byte 0xbb # mov bx, imm16\r | |
55 | .word _GdtDesc - _SmiEntryPoint + 0x8000\r | |
56 | .byte 0x2e,0xa1 # mov ax, cs:[offset16]\r | |
57 | .word DSC_OFFSET + DSC_GDTSIZ\r | |
58 | decl %eax\r | |
59 | movl %eax, %cs:(%edi) # mov cs:[bx], ax\r | |
60 | .byte 0x66,0x2e,0xa1 # mov eax, cs:[offset16]\r | |
61 | .word DSC_OFFSET + DSC_GDTPTR\r | |
62 | movw %ax, %cs:2(%edi)\r | |
63 | movw %ax, %bp # ebp = GDT base\r | |
64 | .byte 0x66\r | |
65 | lgdt %cs:(%edi)\r | |
66 | # Patch ProtectedMode Segment\r | |
67 | .byte 0xb8 # mov ax, imm16\r | |
68 | .word PROTECT_MODE_CS # set AX for segment directly\r | |
69 | movl %eax, %cs:-2(%edi) # mov cs:[bx - 2], ax\r | |
70 | # Patch ProtectedMode entry\r | |
71 | .byte 0x66, 0xbf # mov edi, SMBASE\r | |
72 | ASM_PFX(gSmbase): .space 4\r | |
73 | .byte 0x67\r | |
74 | lea ((Start32bit - _SmiEntryPoint) + 0x8000)(%edi), %ax\r | |
75 | movw %ax, %cs:-6(%edi)\r | |
76 | movl %cr0, %ebx\r | |
77 | .byte 0x66\r | |
78 | andl $0x9ffafff3, %ebx\r | |
79 | .byte 0x66\r | |
80 | orl $0x23, %ebx\r | |
81 | movl %ebx, %cr0\r | |
82 | .byte 0x66,0xea\r | |
83 | .space 4\r | |
84 | .space 2\r | |
85 | _GdtDesc: .space 4\r | |
86 | .space 2\r | |
87 | \r | |
88 | Start32bit:\r | |
89 | movw $PROTECT_MODE_DS, %ax\r | |
90 | movl %eax,%ds\r | |
91 | movl %eax,%es\r | |
92 | movl %eax,%fs\r | |
93 | movl %eax,%gs\r | |
94 | movl %eax,%ss\r | |
95 | .byte 0xbc # mov esp, imm32\r | |
96 | ASM_PFX(gSmiStack): .space 4\r | |
97 | movl $ASM_PFX(gSmiHandlerIdtr), %eax\r | |
98 | lidt (%eax)\r | |
99 | jmp ProtFlatMode\r | |
100 | \r | |
101 | ProtFlatMode:\r | |
102 | .byte 0xb8 # mov eax, imm32\r | |
103 | ASM_PFX(gSmiCr3): .space 4\r | |
104 | movl %eax, %cr3\r | |
105 | #\r | |
106 | # Need to test for CR4 specific bit support\r | |
107 | #\r | |
108 | movl $1, %eax\r | |
109 | cpuid # use CPUID to determine if specific CR4 bits are supported\r | |
110 | xorl %eax, %eax # Clear EAX\r | |
111 | testl $BIT2, %edx # Check for DE capabilities\r | |
112 | jz L8\r | |
113 | orl $BIT3, %eax\r | |
114 | L8:\r | |
115 | testl $BIT6, %edx # Check for PAE capabilities\r | |
116 | jz L9\r | |
117 | orl $BIT5, %eax\r | |
118 | L9:\r | |
119 | testl $BIT7, %edx # Check for MCE capabilities\r | |
120 | jz L10\r | |
121 | orl $BIT6, %eax\r | |
122 | L10:\r | |
123 | testl $BIT24, %edx # Check for FXSR capabilities\r | |
124 | jz L11\r | |
125 | orl $BIT9, %eax\r | |
126 | L11:\r | |
127 | testl $BIT25, %edx # Check for SSE capabilities\r | |
128 | jz L12\r | |
129 | orl $BIT10, %eax\r | |
130 | L12: # as cr4.PGE is not set here, refresh cr3\r | |
131 | movl %eax, %cr4 # in PreModifyMtrrs() to flush TLB.\r | |
132 | \r | |
133 | cmpb $0, ASM_PFX(FeaturePcdGet (PcdCpuSmmStackGuard))\r | |
134 | jz L5\r | |
135 | # Load TSS\r | |
136 | movb $0x89, (TSS_SEGMENT + 5)(%ebp) # clear busy flag\r | |
137 | movl $TSS_SEGMENT, %eax\r | |
138 | ltrw %ax\r | |
139 | L5:\r | |
140 | \r | |
141 | # enable NXE if supported\r | |
142 | .byte 0xb0 # mov al, imm8\r | |
143 | ASM_PFX(mXdSupported): .byte 1\r | |
144 | cmpb $0, %al\r | |
145 | jz SkipNxe\r | |
146 | #\r | |
147 | # Check XD disable bit\r | |
148 | #\r | |
149 | movl $MSR_IA32_MISC_ENABLE, %ecx\r | |
150 | rdmsr\r | |
151 | pushl %edx # save MSR_IA32_MISC_ENABLE[63-32]\r | |
152 | testl $BIT2, %edx # MSR_IA32_MISC_ENABLE[34]\r | |
153 | jz L13\r | |
154 | andw $0x0FFFB, %dx # clear XD Disable bit if it is set\r | |
155 | wrmsr\r | |
156 | L13:\r | |
157 | movl $MSR_EFER, %ecx\r | |
158 | rdmsr\r | |
159 | orw $MSR_EFER_XD,%ax # enable NXE\r | |
160 | wrmsr\r | |
161 | SkipNxe:\r | |
162 | subl $4, %esp\r | |
163 | NxeDone:\r | |
164 | \r | |
165 | movl %cr0, %ebx\r | |
166 | orl $0x080010023, %ebx # enable paging + WP + NE + MP + PE\r | |
167 | movl %ebx, %cr0\r | |
168 | leal DSC_OFFSET(%edi),%ebx\r | |
169 | movw DSC_DS(%ebx),%ax\r | |
170 | movl %eax, %ds\r | |
171 | movw DSC_OTHERSEG(%ebx),%ax\r | |
172 | movl %eax, %es\r | |
173 | movl %eax, %fs\r | |
174 | movl %eax, %gs\r | |
175 | movw DSC_SS(%ebx),%ax\r | |
176 | movl %eax, %ss\r | |
177 | \r | |
178 | # jmp _SmiHandler # instruction is not needed\r | |
179 | \r | |
180 | _SmiHandler:\r | |
181 | movl 4(%esp), %ebx\r | |
182 | \r | |
183 | pushl %ebx\r | |
184 | movl $ASM_PFX(CpuSmmDebugEntry), %eax\r | |
185 | call *%eax\r | |
186 | addl $4, %esp\r | |
187 | \r | |
188 | pushl %ebx\r | |
189 | movl $ASM_PFX(SmiRendezvous), %eax\r | |
190 | call *%eax\r | |
191 | addl $4, %esp\r | |
192 | \r | |
193 | pushl %ebx\r | |
194 | movl $ASM_PFX(CpuSmmDebugExit), %eax\r | |
195 | call *%eax\r | |
196 | addl $4, %esp\r | |
197 | \r | |
198 | movl $ASM_PFX(mXdSupported), %eax\r | |
199 | movb (%eax), %al\r | |
200 | cmpb $0, %al\r | |
201 | jz L16\r | |
202 | popl %edx # get saved MSR_IA32_MISC_ENABLE[63-32]\r | |
203 | testl $BIT2, %edx\r | |
204 | jz L16\r | |
205 | movl $MSR_IA32_MISC_ENABLE, %ecx\r | |
206 | rdmsr\r | |
207 | orw $BIT2, %dx # set XD Disable bit if it was set before entering into SMM\r | |
208 | wrmsr\r | |
209 | \r | |
210 | L16:\r | |
211 | rsm\r | |
212 | \r | |
213 | ASM_PFX(gcSmiHandlerSize): .word . - _SmiEntryPoint\r |