]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * i386 virtual CPU header | |
3 | * | |
4 | * Copyright (c) 2003 Fabrice Bellard | |
5 | * | |
6 | * This library is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU Lesser General Public | |
8 | * License as published by the Free Software Foundation; either | |
9 | * version 2 of the License, or (at your option) any later version. | |
10 | * | |
11 | * This library is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | * Lesser General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU Lesser General Public | |
17 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. | |
18 | */ | |
19 | #ifndef CPU_I386_H | |
20 | #define CPU_I386_H | |
21 | ||
22 | #include "config.h" | |
23 | #include "qemu-common.h" | |
24 | ||
25 | #ifdef TARGET_X86_64 | |
26 | #define TARGET_LONG_BITS 64 | |
27 | #else | |
28 | #define TARGET_LONG_BITS 32 | |
29 | #endif | |
30 | ||
31 | /* target supports implicit self modifying code */ | |
32 | #define TARGET_HAS_SMC | |
33 | /* support for self modifying code even if the modified instruction is | |
34 | close to the modifying instruction */ | |
35 | #define TARGET_HAS_PRECISE_SMC | |
36 | ||
37 | #define TARGET_HAS_ICE 1 | |
38 | ||
39 | #ifdef TARGET_X86_64 | |
40 | #define ELF_MACHINE EM_X86_64 | |
41 | #else | |
42 | #define ELF_MACHINE EM_386 | |
43 | #endif | |
44 | ||
45 | #define CPUState struct CPUX86State | |
46 | ||
47 | #include "cpu-defs.h" | |
48 | ||
49 | #include "softfloat.h" | |
50 | ||
51 | #define R_EAX 0 | |
52 | #define R_ECX 1 | |
53 | #define R_EDX 2 | |
54 | #define R_EBX 3 | |
55 | #define R_ESP 4 | |
56 | #define R_EBP 5 | |
57 | #define R_ESI 6 | |
58 | #define R_EDI 7 | |
59 | ||
60 | #define R_AL 0 | |
61 | #define R_CL 1 | |
62 | #define R_DL 2 | |
63 | #define R_BL 3 | |
64 | #define R_AH 4 | |
65 | #define R_CH 5 | |
66 | #define R_DH 6 | |
67 | #define R_BH 7 | |
68 | ||
69 | #define R_ES 0 | |
70 | #define R_CS 1 | |
71 | #define R_SS 2 | |
72 | #define R_DS 3 | |
73 | #define R_FS 4 | |
74 | #define R_GS 5 | |
75 | ||
76 | /* segment descriptor fields */ | |
77 | #define DESC_G_MASK (1 << 23) | |
78 | #define DESC_B_SHIFT 22 | |
79 | #define DESC_B_MASK (1 << DESC_B_SHIFT) | |
80 | #define DESC_L_SHIFT 21 /* x86_64 only : 64 bit code segment */ | |
81 | #define DESC_L_MASK (1 << DESC_L_SHIFT) | |
82 | #define DESC_AVL_MASK (1 << 20) | |
83 | #define DESC_P_MASK (1 << 15) | |
84 | #define DESC_DPL_SHIFT 13 | |
85 | #define DESC_DPL_MASK (3 << DESC_DPL_SHIFT) | |
86 | #define DESC_S_MASK (1 << 12) | |
87 | #define DESC_TYPE_SHIFT 8 | |
88 | #define DESC_TYPE_MASK (15 << DESC_TYPE_SHIFT) | |
89 | #define DESC_A_MASK (1 << 8) | |
90 | ||
91 | #define DESC_CS_MASK (1 << 11) /* 1=code segment 0=data segment */ | |
92 | #define DESC_C_MASK (1 << 10) /* code: conforming */ | |
93 | #define DESC_R_MASK (1 << 9) /* code: readable */ | |
94 | ||
95 | #define DESC_E_MASK (1 << 10) /* data: expansion direction */ | |
96 | #define DESC_W_MASK (1 << 9) /* data: writable */ | |
97 | ||
98 | #define DESC_TSS_BUSY_MASK (1 << 9) | |
99 | ||
100 | /* eflags masks */ | |
101 | #define CC_C 0x0001 | |
102 | #define CC_P 0x0004 | |
103 | #define CC_A 0x0010 | |
104 | #define CC_Z 0x0040 | |
105 | #define CC_S 0x0080 | |
106 | #define CC_O 0x0800 | |
107 | ||
108 | #define TF_SHIFT 8 | |
109 | #define IOPL_SHIFT 12 | |
110 | #define VM_SHIFT 17 | |
111 | ||
112 | #define TF_MASK 0x00000100 | |
113 | #define IF_MASK 0x00000200 | |
114 | #define DF_MASK 0x00000400 | |
115 | #define IOPL_MASK 0x00003000 | |
116 | #define NT_MASK 0x00004000 | |
117 | #define RF_MASK 0x00010000 | |
118 | #define VM_MASK 0x00020000 | |
119 | #define AC_MASK 0x00040000 | |
120 | #define VIF_MASK 0x00080000 | |
121 | #define VIP_MASK 0x00100000 | |
122 | #define ID_MASK 0x00200000 | |
123 | ||
124 | /* hidden flags - used internally by qemu to represent additional cpu | |
125 | states. Only the CPL, INHIBIT_IRQ, SMM and SVMI are not | |
126 | redundant. We avoid using the IOPL_MASK, TF_MASK and VM_MASK bit | |
127 | position to ease oring with eflags. */ | |
128 | /* current cpl */ | |
129 | #define HF_CPL_SHIFT 0 | |
130 | /* true if soft mmu is being used */ | |
131 | #define HF_SOFTMMU_SHIFT 2 | |
132 | /* true if hardware interrupts must be disabled for next instruction */ | |
133 | #define HF_INHIBIT_IRQ_SHIFT 3 | |
134 | /* 16 or 32 segments */ | |
135 | #define HF_CS32_SHIFT 4 | |
136 | #define HF_SS32_SHIFT 5 | |
137 | /* zero base for DS, ES and SS : can be '0' only in 32 bit CS segment */ | |
138 | #define HF_ADDSEG_SHIFT 6 | |
139 | /* copy of CR0.PE (protected mode) */ | |
140 | #define HF_PE_SHIFT 7 | |
141 | #define HF_TF_SHIFT 8 /* must be same as eflags */ | |
142 | #define HF_MP_SHIFT 9 /* the order must be MP, EM, TS */ | |
143 | #define HF_EM_SHIFT 10 | |
144 | #define HF_TS_SHIFT 11 | |
145 | #define HF_IOPL_SHIFT 12 /* must be same as eflags */ | |
146 | #define HF_LMA_SHIFT 14 /* only used on x86_64: long mode active */ | |
147 | #define HF_CS64_SHIFT 15 /* only used on x86_64: 64 bit code segment */ | |
148 | #define HF_RF_SHIFT 16 /* must be same as eflags */ | |
149 | #define HF_VM_SHIFT 17 /* must be same as eflags */ | |
150 | #define HF_SMM_SHIFT 19 /* CPU in SMM mode */ | |
151 | #define HF_SVME_SHIFT 20 /* SVME enabled (copy of EFER.SVME) */ | |
152 | #define HF_SVMI_SHIFT 21 /* SVM intercepts are active */ | |
153 | #define HF_OSFXSR_SHIFT 22 /* CR4.OSFXSR */ | |
154 | ||
155 | #define HF_CPL_MASK (3 << HF_CPL_SHIFT) | |
156 | #define HF_SOFTMMU_MASK (1 << HF_SOFTMMU_SHIFT) | |
157 | #define HF_INHIBIT_IRQ_MASK (1 << HF_INHIBIT_IRQ_SHIFT) | |
158 | #define HF_CS32_MASK (1 << HF_CS32_SHIFT) | |
159 | #define HF_SS32_MASK (1 << HF_SS32_SHIFT) | |
160 | #define HF_ADDSEG_MASK (1 << HF_ADDSEG_SHIFT) | |
161 | #define HF_PE_MASK (1 << HF_PE_SHIFT) | |
162 | #define HF_TF_MASK (1 << HF_TF_SHIFT) | |
163 | #define HF_MP_MASK (1 << HF_MP_SHIFT) | |
164 | #define HF_EM_MASK (1 << HF_EM_SHIFT) | |
165 | #define HF_TS_MASK (1 << HF_TS_SHIFT) | |
166 | #define HF_IOPL_MASK (3 << HF_IOPL_SHIFT) | |
167 | #define HF_LMA_MASK (1 << HF_LMA_SHIFT) | |
168 | #define HF_CS64_MASK (1 << HF_CS64_SHIFT) | |
169 | #define HF_RF_MASK (1 << HF_RF_SHIFT) | |
170 | #define HF_VM_MASK (1 << HF_VM_SHIFT) | |
171 | #define HF_SMM_MASK (1 << HF_SMM_SHIFT) | |
172 | #define HF_SVME_MASK (1 << HF_SVME_SHIFT) | |
173 | #define HF_SVMI_MASK (1 << HF_SVMI_SHIFT) | |
174 | #define HF_OSFXSR_MASK (1 << HF_OSFXSR_SHIFT) | |
175 | ||
176 | /* hflags2 */ | |
177 | ||
178 | #define HF2_GIF_SHIFT 0 /* if set CPU takes interrupts */ | |
179 | #define HF2_HIF_SHIFT 1 /* value of IF_MASK when entering SVM */ | |
180 | #define HF2_NMI_SHIFT 2 /* CPU serving NMI */ | |
181 | #define HF2_VINTR_SHIFT 3 /* value of V_INTR_MASKING bit */ | |
182 | ||
183 | #define HF2_GIF_MASK (1 << HF2_GIF_SHIFT) | |
184 | #define HF2_HIF_MASK (1 << HF2_HIF_SHIFT) | |
185 | #define HF2_NMI_MASK (1 << HF2_NMI_SHIFT) | |
186 | #define HF2_VINTR_MASK (1 << HF2_VINTR_SHIFT) | |
187 | ||
188 | #define CR0_PE_SHIFT 0 | |
189 | #define CR0_MP_SHIFT 1 | |
190 | ||
191 | #define CR0_PE_MASK (1 << 0) | |
192 | #define CR0_MP_MASK (1 << 1) | |
193 | #define CR0_EM_MASK (1 << 2) | |
194 | #define CR0_TS_MASK (1 << 3) | |
195 | #define CR0_ET_MASK (1 << 4) | |
196 | #define CR0_NE_MASK (1 << 5) | |
197 | #define CR0_WP_MASK (1 << 16) | |
198 | #define CR0_AM_MASK (1 << 18) | |
199 | #define CR0_PG_MASK (1 << 31) | |
200 | ||
201 | #define CR4_VME_MASK (1 << 0) | |
202 | #define CR4_PVI_MASK (1 << 1) | |
203 | #define CR4_TSD_MASK (1 << 2) | |
204 | #define CR4_DE_MASK (1 << 3) | |
205 | #define CR4_PSE_MASK (1 << 4) | |
206 | #define CR4_PAE_MASK (1 << 5) | |
207 | #define CR4_MCE_MASK (1 << 6) | |
208 | #define CR4_PGE_MASK (1 << 7) | |
209 | #define CR4_PCE_MASK (1 << 8) | |
210 | #define CR4_OSFXSR_SHIFT 9 | |
211 | #define CR4_OSFXSR_MASK (1 << CR4_OSFXSR_SHIFT) | |
212 | #define CR4_OSXMMEXCPT_MASK (1 << 10) | |
213 | ||
214 | #define DR6_BD (1 << 13) | |
215 | #define DR6_BS (1 << 14) | |
216 | #define DR6_BT (1 << 15) | |
217 | #define DR6_FIXED_1 0xffff0ff0 | |
218 | ||
219 | #define DR7_GD (1 << 13) | |
220 | #define DR7_TYPE_SHIFT 16 | |
221 | #define DR7_LEN_SHIFT 18 | |
222 | #define DR7_FIXED_1 0x00000400 | |
223 | ||
224 | #define PG_PRESENT_BIT 0 | |
225 | #define PG_RW_BIT 1 | |
226 | #define PG_USER_BIT 2 | |
227 | #define PG_PWT_BIT 3 | |
228 | #define PG_PCD_BIT 4 | |
229 | #define PG_ACCESSED_BIT 5 | |
230 | #define PG_DIRTY_BIT 6 | |
231 | #define PG_PSE_BIT 7 | |
232 | #define PG_GLOBAL_BIT 8 | |
233 | #define PG_NX_BIT 63 | |
234 | ||
235 | #define PG_PRESENT_MASK (1 << PG_PRESENT_BIT) | |
236 | #define PG_RW_MASK (1 << PG_RW_BIT) | |
237 | #define PG_USER_MASK (1 << PG_USER_BIT) | |
238 | #define PG_PWT_MASK (1 << PG_PWT_BIT) | |
239 | #define PG_PCD_MASK (1 << PG_PCD_BIT) | |
240 | #define PG_ACCESSED_MASK (1 << PG_ACCESSED_BIT) | |
241 | #define PG_DIRTY_MASK (1 << PG_DIRTY_BIT) | |
242 | #define PG_PSE_MASK (1 << PG_PSE_BIT) | |
243 | #define PG_GLOBAL_MASK (1 << PG_GLOBAL_BIT) | |
244 | #define PG_NX_MASK (1LL << PG_NX_BIT) | |
245 | ||
246 | #define PG_ERROR_W_BIT 1 | |
247 | ||
248 | #define PG_ERROR_P_MASK 0x01 | |
249 | #define PG_ERROR_W_MASK (1 << PG_ERROR_W_BIT) | |
250 | #define PG_ERROR_U_MASK 0x04 | |
251 | #define PG_ERROR_RSVD_MASK 0x08 | |
252 | #define PG_ERROR_I_D_MASK 0x10 | |
253 | ||
254 | #define MCG_CTL_P (1ULL<<8) /* MCG_CAP register available */ | |
255 | #define MCG_SER_P (1ULL<<24) /* MCA recovery/new status bits */ | |
256 | ||
257 | #define MCE_CAP_DEF (MCG_CTL_P|MCG_SER_P) | |
258 | #define MCE_BANKS_DEF 10 | |
259 | ||
260 | #define MCG_STATUS_RIPV (1ULL<<0) /* restart ip valid */ | |
261 | #define MCG_STATUS_EIPV (1ULL<<1) /* ip points to correct instruction */ | |
262 | #define MCG_STATUS_MCIP (1ULL<<2) /* machine check in progress */ | |
263 | ||
264 | #define MCI_STATUS_VAL (1ULL<<63) /* valid error */ | |
265 | #define MCI_STATUS_OVER (1ULL<<62) /* previous errors lost */ | |
266 | #define MCI_STATUS_UC (1ULL<<61) /* uncorrected error */ | |
267 | #define MCI_STATUS_EN (1ULL<<60) /* error enabled */ | |
268 | #define MCI_STATUS_MISCV (1ULL<<59) /* misc error reg. valid */ | |
269 | #define MCI_STATUS_ADDRV (1ULL<<58) /* addr reg. valid */ | |
270 | #define MCI_STATUS_PCC (1ULL<<57) /* processor context corrupt */ | |
271 | #define MCI_STATUS_S (1ULL<<56) /* Signaled machine check */ | |
272 | #define MCI_STATUS_AR (1ULL<<55) /* Action required */ | |
273 | ||
274 | /* MISC register defines */ | |
275 | #define MCM_ADDR_SEGOFF 0 /* segment offset */ | |
276 | #define MCM_ADDR_LINEAR 1 /* linear address */ | |
277 | #define MCM_ADDR_PHYS 2 /* physical address */ | |
278 | #define MCM_ADDR_MEM 3 /* memory address */ | |
279 | #define MCM_ADDR_GENERIC 7 /* generic */ | |
280 | ||
281 | #define MSR_IA32_TSC 0x10 | |
282 | #define MSR_IA32_APICBASE 0x1b | |
283 | #define MSR_IA32_APICBASE_BSP (1<<8) | |
284 | #define MSR_IA32_APICBASE_ENABLE (1<<11) | |
285 | #define MSR_IA32_APICBASE_BASE (0xfffff<<12) | |
286 | #define MSR_IA32_TSCDEADLINE 0x6e0 | |
287 | ||
288 | #define MSR_MTRRcap 0xfe | |
289 | #define MSR_MTRRcap_VCNT 8 | |
290 | #define MSR_MTRRcap_FIXRANGE_SUPPORT (1 << 8) | |
291 | #define MSR_MTRRcap_WC_SUPPORTED (1 << 10) | |
292 | ||
293 | #define MSR_IA32_SYSENTER_CS 0x174 | |
294 | #define MSR_IA32_SYSENTER_ESP 0x175 | |
295 | #define MSR_IA32_SYSENTER_EIP 0x176 | |
296 | ||
297 | #define MSR_MCG_CAP 0x179 | |
298 | #define MSR_MCG_STATUS 0x17a | |
299 | #define MSR_MCG_CTL 0x17b | |
300 | ||
301 | #define MSR_IA32_PERF_STATUS 0x198 | |
302 | ||
303 | #define MSR_IA32_MISC_ENABLE 0x1a0 | |
304 | /* Indicates good rep/movs microcode on some processors: */ | |
305 | #define MSR_IA32_MISC_ENABLE_DEFAULT 1 | |
306 | ||
307 | #define MSR_MTRRphysBase(reg) (0x200 + 2 * (reg)) | |
308 | #define MSR_MTRRphysMask(reg) (0x200 + 2 * (reg) + 1) | |
309 | ||
310 | #define MSR_MTRRfix64K_00000 0x250 | |
311 | #define MSR_MTRRfix16K_80000 0x258 | |
312 | #define MSR_MTRRfix16K_A0000 0x259 | |
313 | #define MSR_MTRRfix4K_C0000 0x268 | |
314 | #define MSR_MTRRfix4K_C8000 0x269 | |
315 | #define MSR_MTRRfix4K_D0000 0x26a | |
316 | #define MSR_MTRRfix4K_D8000 0x26b | |
317 | #define MSR_MTRRfix4K_E0000 0x26c | |
318 | #define MSR_MTRRfix4K_E8000 0x26d | |
319 | #define MSR_MTRRfix4K_F0000 0x26e | |
320 | #define MSR_MTRRfix4K_F8000 0x26f | |
321 | ||
322 | #define MSR_PAT 0x277 | |
323 | ||
324 | #define MSR_MTRRdefType 0x2ff | |
325 | ||
326 | #define MSR_MC0_CTL 0x400 | |
327 | #define MSR_MC0_STATUS 0x401 | |
328 | #define MSR_MC0_ADDR 0x402 | |
329 | #define MSR_MC0_MISC 0x403 | |
330 | ||
331 | #define MSR_EFER 0xc0000080 | |
332 | ||
333 | #define MSR_EFER_SCE (1 << 0) | |
334 | #define MSR_EFER_LME (1 << 8) | |
335 | #define MSR_EFER_LMA (1 << 10) | |
336 | #define MSR_EFER_NXE (1 << 11) | |
337 | #define MSR_EFER_SVME (1 << 12) | |
338 | #define MSR_EFER_FFXSR (1 << 14) | |
339 | ||
340 | #define MSR_STAR 0xc0000081 | |
341 | #define MSR_LSTAR 0xc0000082 | |
342 | #define MSR_CSTAR 0xc0000083 | |
343 | #define MSR_FMASK 0xc0000084 | |
344 | #define MSR_FSBASE 0xc0000100 | |
345 | #define MSR_GSBASE 0xc0000101 | |
346 | #define MSR_KERNELGSBASE 0xc0000102 | |
347 | #define MSR_TSC_AUX 0xc0000103 | |
348 | ||
349 | #define MSR_VM_HSAVE_PA 0xc0010117 | |
350 | ||
351 | /* cpuid_features bits */ | |
352 | #define CPUID_FP87 (1 << 0) | |
353 | #define CPUID_VME (1 << 1) | |
354 | #define CPUID_DE (1 << 2) | |
355 | #define CPUID_PSE (1 << 3) | |
356 | #define CPUID_TSC (1 << 4) | |
357 | #define CPUID_MSR (1 << 5) | |
358 | #define CPUID_PAE (1 << 6) | |
359 | #define CPUID_MCE (1 << 7) | |
360 | #define CPUID_CX8 (1 << 8) | |
361 | #define CPUID_APIC (1 << 9) | |
362 | #define CPUID_SEP (1 << 11) /* sysenter/sysexit */ | |
363 | #define CPUID_MTRR (1 << 12) | |
364 | #define CPUID_PGE (1 << 13) | |
365 | #define CPUID_MCA (1 << 14) | |
366 | #define CPUID_CMOV (1 << 15) | |
367 | #define CPUID_PAT (1 << 16) | |
368 | #define CPUID_PSE36 (1 << 17) | |
369 | #define CPUID_PN (1 << 18) | |
370 | #define CPUID_CLFLUSH (1 << 19) | |
371 | #define CPUID_DTS (1 << 21) | |
372 | #define CPUID_ACPI (1 << 22) | |
373 | #define CPUID_MMX (1 << 23) | |
374 | #define CPUID_FXSR (1 << 24) | |
375 | #define CPUID_SSE (1 << 25) | |
376 | #define CPUID_SSE2 (1 << 26) | |
377 | #define CPUID_SS (1 << 27) | |
378 | #define CPUID_HT (1 << 28) | |
379 | #define CPUID_TM (1 << 29) | |
380 | #define CPUID_IA64 (1 << 30) | |
381 | #define CPUID_PBE (1 << 31) | |
382 | ||
383 | #define CPUID_EXT_SSE3 (1 << 0) | |
384 | #define CPUID_EXT_DTES64 (1 << 2) | |
385 | #define CPUID_EXT_MONITOR (1 << 3) | |
386 | #define CPUID_EXT_DSCPL (1 << 4) | |
387 | #define CPUID_EXT_VMX (1 << 5) | |
388 | #define CPUID_EXT_SMX (1 << 6) | |
389 | #define CPUID_EXT_EST (1 << 7) | |
390 | #define CPUID_EXT_TM2 (1 << 8) | |
391 | #define CPUID_EXT_SSSE3 (1 << 9) | |
392 | #define CPUID_EXT_CID (1 << 10) | |
393 | #define CPUID_EXT_CX16 (1 << 13) | |
394 | #define CPUID_EXT_XTPR (1 << 14) | |
395 | #define CPUID_EXT_PDCM (1 << 15) | |
396 | #define CPUID_EXT_DCA (1 << 18) | |
397 | #define CPUID_EXT_SSE41 (1 << 19) | |
398 | #define CPUID_EXT_SSE42 (1 << 20) | |
399 | #define CPUID_EXT_X2APIC (1 << 21) | |
400 | #define CPUID_EXT_MOVBE (1 << 22) | |
401 | #define CPUID_EXT_POPCNT (1 << 23) | |
402 | #define CPUID_EXT_XSAVE (1 << 26) | |
403 | #define CPUID_EXT_OSXSAVE (1 << 27) | |
404 | #define CPUID_EXT_HYPERVISOR (1 << 31) | |
405 | ||
406 | #define CPUID_EXT2_SYSCALL (1 << 11) | |
407 | #define CPUID_EXT2_MP (1 << 19) | |
408 | #define CPUID_EXT2_NX (1 << 20) | |
409 | #define CPUID_EXT2_MMXEXT (1 << 22) | |
410 | #define CPUID_EXT2_FFXSR (1 << 25) | |
411 | #define CPUID_EXT2_PDPE1GB (1 << 26) | |
412 | #define CPUID_EXT2_RDTSCP (1 << 27) | |
413 | #define CPUID_EXT2_LM (1 << 29) | |
414 | #define CPUID_EXT2_3DNOWEXT (1 << 30) | |
415 | #define CPUID_EXT2_3DNOW (1 << 31) | |
416 | ||
417 | #define CPUID_EXT3_LAHF_LM (1 << 0) | |
418 | #define CPUID_EXT3_CMP_LEG (1 << 1) | |
419 | #define CPUID_EXT3_SVM (1 << 2) | |
420 | #define CPUID_EXT3_EXTAPIC (1 << 3) | |
421 | #define CPUID_EXT3_CR8LEG (1 << 4) | |
422 | #define CPUID_EXT3_ABM (1 << 5) | |
423 | #define CPUID_EXT3_SSE4A (1 << 6) | |
424 | #define CPUID_EXT3_MISALIGNSSE (1 << 7) | |
425 | #define CPUID_EXT3_3DNOWPREFETCH (1 << 8) | |
426 | #define CPUID_EXT3_OSVW (1 << 9) | |
427 | #define CPUID_EXT3_IBS (1 << 10) | |
428 | #define CPUID_EXT3_SKINIT (1 << 12) | |
429 | ||
430 | #define CPUID_SVM_NPT (1 << 0) | |
431 | #define CPUID_SVM_LBRV (1 << 1) | |
432 | #define CPUID_SVM_SVMLOCK (1 << 2) | |
433 | #define CPUID_SVM_NRIPSAVE (1 << 3) | |
434 | #define CPUID_SVM_TSCSCALE (1 << 4) | |
435 | #define CPUID_SVM_VMCBCLEAN (1 << 5) | |
436 | #define CPUID_SVM_FLUSHASID (1 << 6) | |
437 | #define CPUID_SVM_DECODEASSIST (1 << 7) | |
438 | #define CPUID_SVM_PAUSEFILTER (1 << 10) | |
439 | #define CPUID_SVM_PFTHRESHOLD (1 << 12) | |
440 | ||
441 | #define CPUID_VENDOR_INTEL_1 0x756e6547 /* "Genu" */ | |
442 | #define CPUID_VENDOR_INTEL_2 0x49656e69 /* "ineI" */ | |
443 | #define CPUID_VENDOR_INTEL_3 0x6c65746e /* "ntel" */ | |
444 | ||
445 | #define CPUID_VENDOR_AMD_1 0x68747541 /* "Auth" */ | |
446 | #define CPUID_VENDOR_AMD_2 0x69746e65 /* "enti" */ | |
447 | #define CPUID_VENDOR_AMD_3 0x444d4163 /* "cAMD" */ | |
448 | ||
449 | #define CPUID_VENDOR_VIA_1 0x746e6543 /* "Cent" */ | |
450 | #define CPUID_VENDOR_VIA_2 0x48727561 /* "aurH" */ | |
451 | #define CPUID_VENDOR_VIA_3 0x736c7561 /* "auls" */ | |
452 | ||
453 | #define CPUID_MWAIT_IBE (1 << 1) /* Interrupts can exit capability */ | |
454 | #define CPUID_MWAIT_EMX (1 << 0) /* enumeration supported */ | |
455 | ||
456 | #define EXCP00_DIVZ 0 | |
457 | #define EXCP01_DB 1 | |
458 | #define EXCP02_NMI 2 | |
459 | #define EXCP03_INT3 3 | |
460 | #define EXCP04_INTO 4 | |
461 | #define EXCP05_BOUND 5 | |
462 | #define EXCP06_ILLOP 6 | |
463 | #define EXCP07_PREX 7 | |
464 | #define EXCP08_DBLE 8 | |
465 | #define EXCP09_XERR 9 | |
466 | #define EXCP0A_TSS 10 | |
467 | #define EXCP0B_NOSEG 11 | |
468 | #define EXCP0C_STACK 12 | |
469 | #define EXCP0D_GPF 13 | |
470 | #define EXCP0E_PAGE 14 | |
471 | #define EXCP10_COPR 16 | |
472 | #define EXCP11_ALGN 17 | |
473 | #define EXCP12_MCHK 18 | |
474 | ||
475 | #define EXCP_SYSCALL 0x100 /* only happens in user only emulation | |
476 | for syscall instruction */ | |
477 | ||
478 | /* i386-specific interrupt pending bits. */ | |
479 | #define CPU_INTERRUPT_SMI CPU_INTERRUPT_TGT_EXT_2 | |
480 | #define CPU_INTERRUPT_NMI CPU_INTERRUPT_TGT_EXT_3 | |
481 | #define CPU_INTERRUPT_MCE CPU_INTERRUPT_TGT_EXT_4 | |
482 | #define CPU_INTERRUPT_VIRQ CPU_INTERRUPT_TGT_INT_0 | |
483 | #define CPU_INTERRUPT_INIT CPU_INTERRUPT_TGT_INT_1 | |
484 | #define CPU_INTERRUPT_SIPI CPU_INTERRUPT_TGT_INT_2 | |
485 | #define CPU_INTERRUPT_TPR CPU_INTERRUPT_TGT_INT_3 | |
486 | ||
487 | ||
488 | enum { | |
489 | CC_OP_DYNAMIC, /* must use dynamic code to get cc_op */ | |
490 | CC_OP_EFLAGS, /* all cc are explicitly computed, CC_SRC = flags */ | |
491 | ||
492 | CC_OP_MULB, /* modify all flags, C, O = (CC_SRC != 0) */ | |
493 | CC_OP_MULW, | |
494 | CC_OP_MULL, | |
495 | CC_OP_MULQ, | |
496 | ||
497 | CC_OP_ADDB, /* modify all flags, CC_DST = res, CC_SRC = src1 */ | |
498 | CC_OP_ADDW, | |
499 | CC_OP_ADDL, | |
500 | CC_OP_ADDQ, | |
501 | ||
502 | CC_OP_ADCB, /* modify all flags, CC_DST = res, CC_SRC = src1 */ | |
503 | CC_OP_ADCW, | |
504 | CC_OP_ADCL, | |
505 | CC_OP_ADCQ, | |
506 | ||
507 | CC_OP_SUBB, /* modify all flags, CC_DST = res, CC_SRC = src1 */ | |
508 | CC_OP_SUBW, | |
509 | CC_OP_SUBL, | |
510 | CC_OP_SUBQ, | |
511 | ||
512 | CC_OP_SBBB, /* modify all flags, CC_DST = res, CC_SRC = src1 */ | |
513 | CC_OP_SBBW, | |
514 | CC_OP_SBBL, | |
515 | CC_OP_SBBQ, | |
516 | ||
517 | CC_OP_LOGICB, /* modify all flags, CC_DST = res */ | |
518 | CC_OP_LOGICW, | |
519 | CC_OP_LOGICL, | |
520 | CC_OP_LOGICQ, | |
521 | ||
522 | CC_OP_INCB, /* modify all flags except, CC_DST = res, CC_SRC = C */ | |
523 | CC_OP_INCW, | |
524 | CC_OP_INCL, | |
525 | CC_OP_INCQ, | |
526 | ||
527 | CC_OP_DECB, /* modify all flags except, CC_DST = res, CC_SRC = C */ | |
528 | CC_OP_DECW, | |
529 | CC_OP_DECL, | |
530 | CC_OP_DECQ, | |
531 | ||
532 | CC_OP_SHLB, /* modify all flags, CC_DST = res, CC_SRC.msb = C */ | |
533 | CC_OP_SHLW, | |
534 | CC_OP_SHLL, | |
535 | CC_OP_SHLQ, | |
536 | ||
537 | CC_OP_SARB, /* modify all flags, CC_DST = res, CC_SRC.lsb = C */ | |
538 | CC_OP_SARW, | |
539 | CC_OP_SARL, | |
540 | CC_OP_SARQ, | |
541 | ||
542 | CC_OP_NB, | |
543 | }; | |
544 | ||
545 | typedef struct SegmentCache { | |
546 | uint32_t selector; | |
547 | target_ulong base; | |
548 | uint32_t limit; | |
549 | uint32_t flags; | |
550 | } SegmentCache; | |
551 | ||
552 | typedef union { | |
553 | uint8_t _b[16]; | |
554 | uint16_t _w[8]; | |
555 | uint32_t _l[4]; | |
556 | uint64_t _q[2]; | |
557 | float32 _s[4]; | |
558 | float64 _d[2]; | |
559 | } XMMReg; | |
560 | ||
561 | typedef union { | |
562 | uint8_t _b[8]; | |
563 | uint16_t _w[4]; | |
564 | uint32_t _l[2]; | |
565 | float32 _s[2]; | |
566 | uint64_t q; | |
567 | } MMXReg; | |
568 | ||
569 | #ifdef HOST_WORDS_BIGENDIAN | |
570 | #define XMM_B(n) _b[15 - (n)] | |
571 | #define XMM_W(n) _w[7 - (n)] | |
572 | #define XMM_L(n) _l[3 - (n)] | |
573 | #define XMM_S(n) _s[3 - (n)] | |
574 | #define XMM_Q(n) _q[1 - (n)] | |
575 | #define XMM_D(n) _d[1 - (n)] | |
576 | ||
577 | #define MMX_B(n) _b[7 - (n)] | |
578 | #define MMX_W(n) _w[3 - (n)] | |
579 | #define MMX_L(n) _l[1 - (n)] | |
580 | #define MMX_S(n) _s[1 - (n)] | |
581 | #else | |
582 | #define XMM_B(n) _b[n] | |
583 | #define XMM_W(n) _w[n] | |
584 | #define XMM_L(n) _l[n] | |
585 | #define XMM_S(n) _s[n] | |
586 | #define XMM_Q(n) _q[n] | |
587 | #define XMM_D(n) _d[n] | |
588 | ||
589 | #define MMX_B(n) _b[n] | |
590 | #define MMX_W(n) _w[n] | |
591 | #define MMX_L(n) _l[n] | |
592 | #define MMX_S(n) _s[n] | |
593 | #endif | |
594 | #define MMX_Q(n) q | |
595 | ||
596 | typedef union { | |
597 | floatx80 d __attribute__((aligned(16))); | |
598 | MMXReg mmx; | |
599 | } FPReg; | |
600 | ||
601 | typedef struct { | |
602 | uint64_t base; | |
603 | uint64_t mask; | |
604 | } MTRRVar; | |
605 | ||
606 | #define CPU_NB_REGS64 16 | |
607 | #define CPU_NB_REGS32 8 | |
608 | ||
609 | #ifdef TARGET_X86_64 | |
610 | #define CPU_NB_REGS CPU_NB_REGS64 | |
611 | #else | |
612 | #define CPU_NB_REGS CPU_NB_REGS32 | |
613 | #endif | |
614 | ||
615 | #define NB_MMU_MODES 2 | |
616 | ||
617 | typedef enum TPRAccess { | |
618 | TPR_ACCESS_READ, | |
619 | TPR_ACCESS_WRITE, | |
620 | } TPRAccess; | |
621 | ||
622 | typedef struct CPUX86State { | |
623 | /* standard registers */ | |
624 | target_ulong regs[CPU_NB_REGS]; | |
625 | target_ulong eip; | |
626 | target_ulong eflags; /* eflags register. During CPU emulation, CC | |
627 | flags and DF are set to zero because they are | |
628 | stored elsewhere */ | |
629 | ||
630 | /* emulator internal eflags handling */ | |
631 | target_ulong cc_src; | |
632 | target_ulong cc_dst; | |
633 | uint32_t cc_op; | |
634 | int32_t df; /* D flag : 1 if D = 0, -1 if D = 1 */ | |
635 | uint32_t hflags; /* TB flags, see HF_xxx constants. These flags | |
636 | are known at translation time. */ | |
637 | uint32_t hflags2; /* various other flags, see HF2_xxx constants. */ | |
638 | ||
639 | /* segments */ | |
640 | SegmentCache segs[6]; /* selector values */ | |
641 | SegmentCache ldt; | |
642 | SegmentCache tr; | |
643 | SegmentCache gdt; /* only base and limit are used */ | |
644 | SegmentCache idt; /* only base and limit are used */ | |
645 | ||
646 | target_ulong cr[5]; /* NOTE: cr1 is unused */ | |
647 | int32_t a20_mask; | |
648 | ||
649 | /* FPU state */ | |
650 | unsigned int fpstt; /* top of stack index */ | |
651 | uint16_t fpus; | |
652 | uint16_t fpuc; | |
653 | uint8_t fptags[8]; /* 0 = valid, 1 = empty */ | |
654 | FPReg fpregs[8]; | |
655 | /* KVM-only so far */ | |
656 | uint16_t fpop; | |
657 | uint64_t fpip; | |
658 | uint64_t fpdp; | |
659 | ||
660 | /* emulator internal variables */ | |
661 | float_status fp_status; | |
662 | floatx80 ft0; | |
663 | ||
664 | float_status mmx_status; /* for 3DNow! float ops */ | |
665 | float_status sse_status; | |
666 | uint32_t mxcsr; | |
667 | XMMReg xmm_regs[CPU_NB_REGS]; | |
668 | XMMReg xmm_t0; | |
669 | MMXReg mmx_t0; | |
670 | target_ulong cc_tmp; /* temporary for rcr/rcl */ | |
671 | ||
672 | /* sysenter registers */ | |
673 | uint32_t sysenter_cs; | |
674 | target_ulong sysenter_esp; | |
675 | target_ulong sysenter_eip; | |
676 | uint64_t efer; | |
677 | uint64_t star; | |
678 | ||
679 | uint64_t vm_hsave; | |
680 | uint64_t vm_vmcb; | |
681 | uint64_t tsc_offset; | |
682 | uint64_t intercept; | |
683 | uint16_t intercept_cr_read; | |
684 | uint16_t intercept_cr_write; | |
685 | uint16_t intercept_dr_read; | |
686 | uint16_t intercept_dr_write; | |
687 | uint32_t intercept_exceptions; | |
688 | uint8_t v_tpr; | |
689 | ||
690 | #ifdef TARGET_X86_64 | |
691 | target_ulong lstar; | |
692 | target_ulong cstar; | |
693 | target_ulong fmask; | |
694 | target_ulong kernelgsbase; | |
695 | #endif | |
696 | uint64_t system_time_msr; | |
697 | uint64_t wall_clock_msr; | |
698 | uint64_t async_pf_en_msr; | |
699 | ||
700 | uint64_t tsc; | |
701 | uint64_t tsc_deadline; | |
702 | ||
703 | uint64_t mcg_status; | |
704 | uint64_t msr_ia32_misc_enable; | |
705 | ||
706 | /* exception/interrupt handling */ | |
707 | int error_code; | |
708 | int exception_is_int; | |
709 | target_ulong exception_next_eip; | |
710 | target_ulong dr[8]; /* debug registers */ | |
711 | union { | |
712 | CPUBreakpoint *cpu_breakpoint[4]; | |
713 | CPUWatchpoint *cpu_watchpoint[4]; | |
714 | }; /* break/watchpoints for dr[0..3] */ | |
715 | uint32_t smbase; | |
716 | int old_exception; /* exception in flight */ | |
717 | ||
718 | /* KVM states, automatically cleared on reset */ | |
719 | uint8_t nmi_injected; | |
720 | uint8_t nmi_pending; | |
721 | ||
722 | CPU_COMMON | |
723 | ||
724 | uint64_t pat; | |
725 | ||
726 | /* processor features (e.g. for CPUID insn) */ | |
727 | uint32_t cpuid_level; | |
728 | uint32_t cpuid_vendor1; | |
729 | uint32_t cpuid_vendor2; | |
730 | uint32_t cpuid_vendor3; | |
731 | uint32_t cpuid_version; | |
732 | uint32_t cpuid_features; | |
733 | uint32_t cpuid_ext_features; | |
734 | uint32_t cpuid_xlevel; | |
735 | uint32_t cpuid_model[12]; | |
736 | uint32_t cpuid_ext2_features; | |
737 | uint32_t cpuid_ext3_features; | |
738 | uint32_t cpuid_apic_id; | |
739 | int cpuid_vendor_override; | |
740 | /* Store the results of Centaur's CPUID instructions */ | |
741 | uint32_t cpuid_xlevel2; | |
742 | uint32_t cpuid_ext4_features; | |
743 | ||
744 | /* MTRRs */ | |
745 | uint64_t mtrr_fixed[11]; | |
746 | uint64_t mtrr_deftype; | |
747 | MTRRVar mtrr_var[8]; | |
748 | ||
749 | /* For KVM */ | |
750 | uint32_t mp_state; | |
751 | int32_t exception_injected; | |
752 | int32_t interrupt_injected; | |
753 | uint8_t soft_interrupt; | |
754 | uint8_t has_error_code; | |
755 | uint32_t sipi_vector; | |
756 | uint32_t cpuid_kvm_features; | |
757 | uint32_t cpuid_svm_features; | |
758 | bool tsc_valid; | |
759 | int tsc_khz; | |
760 | void *kvm_xsave_buf; | |
761 | ||
762 | /* in order to simplify APIC support, we leave this pointer to the | |
763 | user */ | |
764 | struct DeviceState *apic_state; | |
765 | ||
766 | uint64_t mcg_cap; | |
767 | uint64_t mcg_ctl; | |
768 | uint64_t mce_banks[MCE_BANKS_DEF*4]; | |
769 | ||
770 | uint64_t tsc_aux; | |
771 | ||
772 | /* vmstate */ | |
773 | uint16_t fpus_vmstate; | |
774 | uint16_t fptag_vmstate; | |
775 | uint16_t fpregs_format_vmstate; | |
776 | ||
777 | uint64_t xstate_bv; | |
778 | XMMReg ymmh_regs[CPU_NB_REGS]; | |
779 | ||
780 | uint64_t xcr0; | |
781 | ||
782 | TPRAccess tpr_access_type; | |
783 | } CPUX86State; | |
784 | ||
785 | CPUX86State *cpu_x86_init(const char *cpu_model); | |
786 | int cpu_x86_exec(CPUX86State *s); | |
787 | void cpu_x86_close(CPUX86State *s); | |
788 | void x86_cpu_list (FILE *f, fprintf_function cpu_fprintf, const char *optarg); | |
789 | void x86_cpudef_setup(void); | |
790 | int cpu_x86_support_mca_broadcast(CPUState *env); | |
791 | ||
792 | int cpu_get_pic_interrupt(CPUX86State *s); | |
793 | /* MSDOS compatibility mode FPU exception support */ | |
794 | void cpu_set_ferr(CPUX86State *s); | |
795 | ||
796 | /* this function must always be used to load data in the segment | |
797 | cache: it synchronizes the hflags with the segment cache values */ | |
798 | static inline void cpu_x86_load_seg_cache(CPUX86State *env, | |
799 | int seg_reg, unsigned int selector, | |
800 | target_ulong base, | |
801 | unsigned int limit, | |
802 | unsigned int flags) | |
803 | { | |
804 | SegmentCache *sc; | |
805 | unsigned int new_hflags; | |
806 | ||
807 | sc = &env->segs[seg_reg]; | |
808 | sc->selector = selector; | |
809 | sc->base = base; | |
810 | sc->limit = limit; | |
811 | sc->flags = flags; | |
812 | ||
813 | /* update the hidden flags */ | |
814 | { | |
815 | if (seg_reg == R_CS) { | |
816 | #ifdef TARGET_X86_64 | |
817 | if ((env->hflags & HF_LMA_MASK) && (flags & DESC_L_MASK)) { | |
818 | /* long mode */ | |
819 | env->hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK; | |
820 | env->hflags &= ~(HF_ADDSEG_MASK); | |
821 | } else | |
822 | #endif | |
823 | { | |
824 | /* legacy / compatibility case */ | |
825 | new_hflags = (env->segs[R_CS].flags & DESC_B_MASK) | |
826 | >> (DESC_B_SHIFT - HF_CS32_SHIFT); | |
827 | env->hflags = (env->hflags & ~(HF_CS32_MASK | HF_CS64_MASK)) | | |
828 | new_hflags; | |
829 | } | |
830 | } | |
831 | new_hflags = (env->segs[R_SS].flags & DESC_B_MASK) | |
832 | >> (DESC_B_SHIFT - HF_SS32_SHIFT); | |
833 | if (env->hflags & HF_CS64_MASK) { | |
834 | /* zero base assumed for DS, ES and SS in long mode */ | |
835 | } else if (!(env->cr[0] & CR0_PE_MASK) || | |
836 | (env->eflags & VM_MASK) || | |
837 | !(env->hflags & HF_CS32_MASK)) { | |
838 | /* XXX: try to avoid this test. The problem comes from the | |
839 | fact that is real mode or vm86 mode we only modify the | |
840 | 'base' and 'selector' fields of the segment cache to go | |
841 | faster. A solution may be to force addseg to one in | |
842 | translate-i386.c. */ | |
843 | new_hflags |= HF_ADDSEG_MASK; | |
844 | } else { | |
845 | new_hflags |= ((env->segs[R_DS].base | | |
846 | env->segs[R_ES].base | | |
847 | env->segs[R_SS].base) != 0) << | |
848 | HF_ADDSEG_SHIFT; | |
849 | } | |
850 | env->hflags = (env->hflags & | |
851 | ~(HF_SS32_MASK | HF_ADDSEG_MASK)) | new_hflags; | |
852 | } | |
853 | } | |
854 | ||
855 | static inline void cpu_x86_load_seg_cache_sipi(CPUX86State *env, | |
856 | int sipi_vector) | |
857 | { | |
858 | env->eip = 0; | |
859 | cpu_x86_load_seg_cache(env, R_CS, sipi_vector << 8, | |
860 | sipi_vector << 12, | |
861 | env->segs[R_CS].limit, | |
862 | env->segs[R_CS].flags); | |
863 | env->halted = 0; | |
864 | } | |
865 | ||
866 | int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector, | |
867 | target_ulong *base, unsigned int *limit, | |
868 | unsigned int *flags); | |
869 | ||
870 | /* wrapper, just in case memory mappings must be changed */ | |
871 | static inline void cpu_x86_set_cpl(CPUX86State *s, int cpl) | |
872 | { | |
873 | #if HF_CPL_MASK == 3 | |
874 | s->hflags = (s->hflags & ~HF_CPL_MASK) | cpl; | |
875 | #else | |
876 | #error HF_CPL_MASK is hardcoded | |
877 | #endif | |
878 | } | |
879 | ||
880 | /* op_helper.c */ | |
881 | /* used for debug or cpu save/restore */ | |
882 | void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, floatx80 f); | |
883 | floatx80 cpu_set_fp80(uint64_t mant, uint16_t upper); | |
884 | ||
885 | /* cpu-exec.c */ | |
886 | /* the following helpers are only usable in user mode simulation as | |
887 | they can trigger unexpected exceptions */ | |
888 | void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector); | |
889 | void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32); | |
890 | void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32); | |
891 | ||
892 | /* you can call this signal handler from your SIGBUS and SIGSEGV | |
893 | signal handlers to inform the virtual CPU of exceptions. non zero | |
894 | is returned if the signal was handled by the virtual CPU. */ | |
895 | int cpu_x86_signal_handler(int host_signum, void *pinfo, | |
896 | void *puc); | |
897 | ||
898 | /* cpuid.c */ | |
899 | void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, | |
900 | uint32_t *eax, uint32_t *ebx, | |
901 | uint32_t *ecx, uint32_t *edx); | |
902 | int cpu_x86_register (CPUX86State *env, const char *cpu_model); | |
903 | void cpu_clear_apic_feature(CPUX86State *env); | |
904 | void host_cpuid(uint32_t function, uint32_t count, | |
905 | uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx); | |
906 | ||
907 | /* helper.c */ | |
908 | int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr, | |
909 | int is_write, int mmu_idx); | |
910 | #define cpu_handle_mmu_fault cpu_x86_handle_mmu_fault | |
911 | void cpu_x86_set_a20(CPUX86State *env, int a20_state); | |
912 | ||
913 | static inline int hw_breakpoint_enabled(unsigned long dr7, int index) | |
914 | { | |
915 | return (dr7 >> (index * 2)) & 3; | |
916 | } | |
917 | ||
918 | static inline int hw_breakpoint_type(unsigned long dr7, int index) | |
919 | { | |
920 | return (dr7 >> (DR7_TYPE_SHIFT + (index * 4))) & 3; | |
921 | } | |
922 | ||
923 | static inline int hw_breakpoint_len(unsigned long dr7, int index) | |
924 | { | |
925 | int len = ((dr7 >> (DR7_LEN_SHIFT + (index * 4))) & 3); | |
926 | return (len == 2) ? 8 : len + 1; | |
927 | } | |
928 | ||
929 | void hw_breakpoint_insert(CPUX86State *env, int index); | |
930 | void hw_breakpoint_remove(CPUX86State *env, int index); | |
931 | int check_hw_breakpoints(CPUX86State *env, int force_dr6_update); | |
932 | ||
933 | /* will be suppressed */ | |
934 | void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0); | |
935 | void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3); | |
936 | void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4); | |
937 | ||
938 | /* hw/pc.c */ | |
939 | void cpu_smm_update(CPUX86State *env); | |
940 | uint64_t cpu_get_tsc(CPUX86State *env); | |
941 | ||
942 | /* used to debug */ | |
943 | #define X86_DUMP_FPU 0x0001 /* dump FPU state too */ | |
944 | #define X86_DUMP_CCOP 0x0002 /* dump qemu flag cache */ | |
945 | ||
946 | #define TARGET_PAGE_BITS 12 | |
947 | ||
948 | #ifdef TARGET_X86_64 | |
949 | #define TARGET_PHYS_ADDR_SPACE_BITS 52 | |
950 | /* ??? This is really 48 bits, sign-extended, but the only thing | |
951 | accessible to userland with bit 48 set is the VSYSCALL, and that | |
952 | is handled via other mechanisms. */ | |
953 | #define TARGET_VIRT_ADDR_SPACE_BITS 47 | |
954 | #else | |
955 | #define TARGET_PHYS_ADDR_SPACE_BITS 36 | |
956 | #define TARGET_VIRT_ADDR_SPACE_BITS 32 | |
957 | #endif | |
958 | ||
959 | #define cpu_init cpu_x86_init | |
960 | #define cpu_exec cpu_x86_exec | |
961 | #define cpu_gen_code cpu_x86_gen_code | |
962 | #define cpu_signal_handler cpu_x86_signal_handler | |
963 | #define cpu_list_id x86_cpu_list | |
964 | #define cpudef_setup x86_cpudef_setup | |
965 | ||
966 | #define CPU_SAVE_VERSION 12 | |
967 | ||
968 | /* MMU modes definitions */ | |
969 | #define MMU_MODE0_SUFFIX _kernel | |
970 | #define MMU_MODE1_SUFFIX _user | |
971 | #define MMU_USER_IDX 1 | |
972 | static inline int cpu_mmu_index (CPUState *env) | |
973 | { | |
974 | return (env->hflags & HF_CPL_MASK) == 3 ? 1 : 0; | |
975 | } | |
976 | ||
977 | #undef EAX | |
978 | #define EAX (env->regs[R_EAX]) | |
979 | #undef ECX | |
980 | #define ECX (env->regs[R_ECX]) | |
981 | #undef EDX | |
982 | #define EDX (env->regs[R_EDX]) | |
983 | #undef EBX | |
984 | #define EBX (env->regs[R_EBX]) | |
985 | #undef ESP | |
986 | #define ESP (env->regs[R_ESP]) | |
987 | #undef EBP | |
988 | #define EBP (env->regs[R_EBP]) | |
989 | #undef ESI | |
990 | #define ESI (env->regs[R_ESI]) | |
991 | #undef EDI | |
992 | #define EDI (env->regs[R_EDI]) | |
993 | #undef EIP | |
994 | #define EIP (env->eip) | |
995 | #define DF (env->df) | |
996 | ||
997 | #define CC_SRC (env->cc_src) | |
998 | #define CC_DST (env->cc_dst) | |
999 | #define CC_OP (env->cc_op) | |
1000 | ||
1001 | /* float macros */ | |
1002 | #define FT0 (env->ft0) | |
1003 | #define ST0 (env->fpregs[env->fpstt].d) | |
1004 | #define ST(n) (env->fpregs[(env->fpstt + (n)) & 7].d) | |
1005 | #define ST1 ST(1) | |
1006 | ||
1007 | /* translate.c */ | |
1008 | void optimize_flags_init(void); | |
1009 | ||
1010 | #if defined(CONFIG_USER_ONLY) | |
1011 | static inline void cpu_clone_regs(CPUState *env, target_ulong newsp) | |
1012 | { | |
1013 | if (newsp) | |
1014 | env->regs[R_ESP] = newsp; | |
1015 | env->regs[R_EAX] = 0; | |
1016 | } | |
1017 | #endif | |
1018 | ||
1019 | #include "cpu-all.h" | |
1020 | #include "svm.h" | |
1021 | ||
1022 | #if !defined(CONFIG_USER_ONLY) | |
1023 | #include "hw/apic.h" | |
1024 | #endif | |
1025 | ||
1026 | static inline bool cpu_has_work(CPUState *env) | |
1027 | { | |
1028 | return ((env->interrupt_request & CPU_INTERRUPT_HARD) && | |
1029 | (env->eflags & IF_MASK)) || | |
1030 | (env->interrupt_request & (CPU_INTERRUPT_NMI | | |
1031 | CPU_INTERRUPT_INIT | | |
1032 | CPU_INTERRUPT_SIPI | | |
1033 | CPU_INTERRUPT_MCE)); | |
1034 | } | |
1035 | ||
1036 | #include "exec-all.h" | |
1037 | ||
1038 | static inline void cpu_pc_from_tb(CPUState *env, TranslationBlock *tb) | |
1039 | { | |
1040 | env->eip = tb->pc - tb->cs_base; | |
1041 | } | |
1042 | ||
1043 | static inline void cpu_get_tb_cpu_state(CPUState *env, target_ulong *pc, | |
1044 | target_ulong *cs_base, int *flags) | |
1045 | { | |
1046 | *cs_base = env->segs[R_CS].base; | |
1047 | *pc = *cs_base + env->eip; | |
1048 | *flags = env->hflags | | |
1049 | (env->eflags & (IOPL_MASK | TF_MASK | RF_MASK | VM_MASK)); | |
1050 | } | |
1051 | ||
1052 | void do_cpu_init(CPUState *env); | |
1053 | void do_cpu_sipi(CPUState *env); | |
1054 | ||
1055 | #define MCE_INJECT_BROADCAST 1 | |
1056 | #define MCE_INJECT_UNCOND_AO 2 | |
1057 | ||
1058 | void cpu_x86_inject_mce(Monitor *mon, CPUState *cenv, int bank, | |
1059 | uint64_t status, uint64_t mcg_status, uint64_t addr, | |
1060 | uint64_t misc, int flags); | |
1061 | ||
1062 | /* op_helper.c */ | |
1063 | void do_interrupt(CPUState *env); | |
1064 | void do_interrupt_x86_hardirq(CPUState *env, int intno, int is_hw); | |
1065 | void QEMU_NORETURN raise_exception_env(int exception_index, CPUState *nenv); | |
1066 | void QEMU_NORETURN raise_exception_err_env(CPUState *nenv, int exception_index, | |
1067 | int error_code); | |
1068 | ||
1069 | void do_smm_enter(CPUState *env1); | |
1070 | ||
1071 | void svm_check_intercept(CPUState *env1, uint32_t type); | |
1072 | ||
1073 | uint32_t cpu_cc_compute_all(CPUState *env1, int op); | |
1074 | ||
1075 | void cpu_report_tpr_access(CPUState *env, TPRAccess access); | |
1076 | ||
1077 | #endif /* CPU_I386_H */ |