2 * i386 virtual CPU header
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 #include "sysemu/tcg.h"
25 #include "hyperv-proto.h"
26 #include "exec/cpu-defs.h"
28 /* The x86 has a strong memory model with some store-after-load re-ordering */
29 #define TCG_GUEST_DEFAULT_MO (TCG_MO_ALL & ~TCG_MO_ST_LD)
31 /* Maximum instruction code size */
32 #define TARGET_MAX_INSN_SIZE 16
34 /* support for self modifying code even if the modified instruction is
35 close to the modifying instruction */
36 #define TARGET_HAS_PRECISE_SMC
39 #define I386_ELF_MACHINE EM_X86_64
40 #define ELF_MACHINE_UNAME "x86_64"
42 #define I386_ELF_MACHINE EM_386
43 #define ELF_MACHINE_UNAME "i686"
85 /* segment descriptor fields */
86 #define DESC_G_SHIFT 23
87 #define DESC_G_MASK (1 << DESC_G_SHIFT)
88 #define DESC_B_SHIFT 22
89 #define DESC_B_MASK (1 << DESC_B_SHIFT)
90 #define DESC_L_SHIFT 21 /* x86_64 only : 64 bit code segment */
91 #define DESC_L_MASK (1 << DESC_L_SHIFT)
92 #define DESC_AVL_SHIFT 20
93 #define DESC_AVL_MASK (1 << DESC_AVL_SHIFT)
94 #define DESC_P_SHIFT 15
95 #define DESC_P_MASK (1 << DESC_P_SHIFT)
96 #define DESC_DPL_SHIFT 13
97 #define DESC_DPL_MASK (3 << DESC_DPL_SHIFT)
98 #define DESC_S_SHIFT 12
99 #define DESC_S_MASK (1 << DESC_S_SHIFT)
100 #define DESC_TYPE_SHIFT 8
101 #define DESC_TYPE_MASK (15 << DESC_TYPE_SHIFT)
102 #define DESC_A_MASK (1 << 8)
104 #define DESC_CS_MASK (1 << 11) /* 1=code segment 0=data segment */
105 #define DESC_C_MASK (1 << 10) /* code: conforming */
106 #define DESC_R_MASK (1 << 9) /* code: readable */
108 #define DESC_E_MASK (1 << 10) /* data: expansion direction */
109 #define DESC_W_MASK (1 << 9) /* data: writable */
111 #define DESC_TSS_BUSY_MASK (1 << 9)
122 #define IOPL_SHIFT 12
125 #define TF_MASK 0x00000100
126 #define IF_MASK 0x00000200
127 #define DF_MASK 0x00000400
128 #define IOPL_MASK 0x00003000
129 #define NT_MASK 0x00004000
130 #define RF_MASK 0x00010000
131 #define VM_MASK 0x00020000
132 #define AC_MASK 0x00040000
133 #define VIF_MASK 0x00080000
134 #define VIP_MASK 0x00100000
135 #define ID_MASK 0x00200000
137 /* hidden flags - used internally by qemu to represent additional cpu
138 states. Only the INHIBIT_IRQ, SMM and SVMI are not redundant. We
139 avoid using the IOPL_MASK, TF_MASK, VM_MASK and AC_MASK bit
140 positions to ease oring with eflags. */
142 #define HF_CPL_SHIFT 0
143 /* true if hardware interrupts must be disabled for next instruction */
144 #define HF_INHIBIT_IRQ_SHIFT 3
145 /* 16 or 32 segments */
146 #define HF_CS32_SHIFT 4
147 #define HF_SS32_SHIFT 5
148 /* zero base for DS, ES and SS : can be '0' only in 32 bit CS segment */
149 #define HF_ADDSEG_SHIFT 6
150 /* copy of CR0.PE (protected mode) */
151 #define HF_PE_SHIFT 7
152 #define HF_TF_SHIFT 8 /* must be same as eflags */
153 #define HF_MP_SHIFT 9 /* the order must be MP, EM, TS */
154 #define HF_EM_SHIFT 10
155 #define HF_TS_SHIFT 11
156 #define HF_IOPL_SHIFT 12 /* must be same as eflags */
157 #define HF_LMA_SHIFT 14 /* only used on x86_64: long mode active */
158 #define HF_CS64_SHIFT 15 /* only used on x86_64: 64 bit code segment */
159 #define HF_RF_SHIFT 16 /* must be same as eflags */
160 #define HF_VM_SHIFT 17 /* must be same as eflags */
161 #define HF_AC_SHIFT 18 /* must be same as eflags */
162 #define HF_SMM_SHIFT 19 /* CPU in SMM mode */
163 #define HF_SVME_SHIFT 20 /* SVME enabled (copy of EFER.SVME) */
164 #define HF_GUEST_SHIFT 21 /* SVM intercepts are active */
165 #define HF_OSFXSR_SHIFT 22 /* CR4.OSFXSR */
166 #define HF_SMAP_SHIFT 23 /* CR4.SMAP */
167 #define HF_IOBPT_SHIFT 24 /* an io breakpoint enabled */
168 #define HF_MPX_EN_SHIFT 25 /* MPX Enabled (CR4+XCR0+BNDCFGx) */
169 #define HF_MPX_IU_SHIFT 26 /* BND registers in-use */
171 #define HF_CPL_MASK (3 << HF_CPL_SHIFT)
172 #define HF_INHIBIT_IRQ_MASK (1 << HF_INHIBIT_IRQ_SHIFT)
173 #define HF_CS32_MASK (1 << HF_CS32_SHIFT)
174 #define HF_SS32_MASK (1 << HF_SS32_SHIFT)
175 #define HF_ADDSEG_MASK (1 << HF_ADDSEG_SHIFT)
176 #define HF_PE_MASK (1 << HF_PE_SHIFT)
177 #define HF_TF_MASK (1 << HF_TF_SHIFT)
178 #define HF_MP_MASK (1 << HF_MP_SHIFT)
179 #define HF_EM_MASK (1 << HF_EM_SHIFT)
180 #define HF_TS_MASK (1 << HF_TS_SHIFT)
181 #define HF_IOPL_MASK (3 << HF_IOPL_SHIFT)
182 #define HF_LMA_MASK (1 << HF_LMA_SHIFT)
183 #define HF_CS64_MASK (1 << HF_CS64_SHIFT)
184 #define HF_RF_MASK (1 << HF_RF_SHIFT)
185 #define HF_VM_MASK (1 << HF_VM_SHIFT)
186 #define HF_AC_MASK (1 << HF_AC_SHIFT)
187 #define HF_SMM_MASK (1 << HF_SMM_SHIFT)
188 #define HF_SVME_MASK (1 << HF_SVME_SHIFT)
189 #define HF_GUEST_MASK (1 << HF_GUEST_SHIFT)
190 #define HF_OSFXSR_MASK (1 << HF_OSFXSR_SHIFT)
191 #define HF_SMAP_MASK (1 << HF_SMAP_SHIFT)
192 #define HF_IOBPT_MASK (1 << HF_IOBPT_SHIFT)
193 #define HF_MPX_EN_MASK (1 << HF_MPX_EN_SHIFT)
194 #define HF_MPX_IU_MASK (1 << HF_MPX_IU_SHIFT)
198 #define HF2_GIF_SHIFT 0 /* if set CPU takes interrupts */
199 #define HF2_HIF_SHIFT 1 /* value of IF_MASK when entering SVM */
200 #define HF2_NMI_SHIFT 2 /* CPU serving NMI */
201 #define HF2_VINTR_SHIFT 3 /* value of V_INTR_MASKING bit */
202 #define HF2_SMM_INSIDE_NMI_SHIFT 4 /* CPU serving SMI nested inside NMI */
203 #define HF2_MPX_PR_SHIFT 5 /* BNDCFGx.BNDPRESERVE */
204 #define HF2_NPT_SHIFT 6 /* Nested Paging enabled */
206 #define HF2_GIF_MASK (1 << HF2_GIF_SHIFT)
207 #define HF2_HIF_MASK (1 << HF2_HIF_SHIFT)
208 #define HF2_NMI_MASK (1 << HF2_NMI_SHIFT)
209 #define HF2_VINTR_MASK (1 << HF2_VINTR_SHIFT)
210 #define HF2_SMM_INSIDE_NMI_MASK (1 << HF2_SMM_INSIDE_NMI_SHIFT)
211 #define HF2_MPX_PR_MASK (1 << HF2_MPX_PR_SHIFT)
212 #define HF2_NPT_MASK (1 << HF2_NPT_SHIFT)
214 #define CR0_PE_SHIFT 0
215 #define CR0_MP_SHIFT 1
217 #define CR0_PE_MASK (1U << 0)
218 #define CR0_MP_MASK (1U << 1)
219 #define CR0_EM_MASK (1U << 2)
220 #define CR0_TS_MASK (1U << 3)
221 #define CR0_ET_MASK (1U << 4)
222 #define CR0_NE_MASK (1U << 5)
223 #define CR0_WP_MASK (1U << 16)
224 #define CR0_AM_MASK (1U << 18)
225 #define CR0_PG_MASK (1U << 31)
227 #define CR4_VME_MASK (1U << 0)
228 #define CR4_PVI_MASK (1U << 1)
229 #define CR4_TSD_MASK (1U << 2)
230 #define CR4_DE_MASK (1U << 3)
231 #define CR4_PSE_MASK (1U << 4)
232 #define CR4_PAE_MASK (1U << 5)
233 #define CR4_MCE_MASK (1U << 6)
234 #define CR4_PGE_MASK (1U << 7)
235 #define CR4_PCE_MASK (1U << 8)
236 #define CR4_OSFXSR_SHIFT 9
237 #define CR4_OSFXSR_MASK (1U << CR4_OSFXSR_SHIFT)
238 #define CR4_OSXMMEXCPT_MASK (1U << 10)
239 #define CR4_LA57_MASK (1U << 12)
240 #define CR4_VMXE_MASK (1U << 13)
241 #define CR4_SMXE_MASK (1U << 14)
242 #define CR4_FSGSBASE_MASK (1U << 16)
243 #define CR4_PCIDE_MASK (1U << 17)
244 #define CR4_OSXSAVE_MASK (1U << 18)
245 #define CR4_SMEP_MASK (1U << 20)
246 #define CR4_SMAP_MASK (1U << 21)
247 #define CR4_PKE_MASK (1U << 22)
249 #define DR6_BD (1 << 13)
250 #define DR6_BS (1 << 14)
251 #define DR6_BT (1 << 15)
252 #define DR6_FIXED_1 0xffff0ff0
254 #define DR7_GD (1 << 13)
255 #define DR7_TYPE_SHIFT 16
256 #define DR7_LEN_SHIFT 18
257 #define DR7_FIXED_1 0x00000400
258 #define DR7_GLOBAL_BP_MASK 0xaa
259 #define DR7_LOCAL_BP_MASK 0x55
261 #define DR7_TYPE_BP_INST 0x0
262 #define DR7_TYPE_DATA_WR 0x1
263 #define DR7_TYPE_IO_RW 0x2
264 #define DR7_TYPE_DATA_RW 0x3
266 #define PG_PRESENT_BIT 0
268 #define PG_USER_BIT 2
271 #define PG_ACCESSED_BIT 5
272 #define PG_DIRTY_BIT 6
274 #define PG_GLOBAL_BIT 8
275 #define PG_PSE_PAT_BIT 12
276 #define PG_PKRU_BIT 59
279 #define PG_PRESENT_MASK (1 << PG_PRESENT_BIT)
280 #define PG_RW_MASK (1 << PG_RW_BIT)
281 #define PG_USER_MASK (1 << PG_USER_BIT)
282 #define PG_PWT_MASK (1 << PG_PWT_BIT)
283 #define PG_PCD_MASK (1 << PG_PCD_BIT)
284 #define PG_ACCESSED_MASK (1 << PG_ACCESSED_BIT)
285 #define PG_DIRTY_MASK (1 << PG_DIRTY_BIT)
286 #define PG_PSE_MASK (1 << PG_PSE_BIT)
287 #define PG_GLOBAL_MASK (1 << PG_GLOBAL_BIT)
288 #define PG_PSE_PAT_MASK (1 << PG_PSE_PAT_BIT)
289 #define PG_ADDRESS_MASK 0x000ffffffffff000LL
290 #define PG_HI_RSVD_MASK (PG_ADDRESS_MASK & ~PHYS_ADDR_MASK)
291 #define PG_HI_USER_MASK 0x7ff0000000000000LL
292 #define PG_PKRU_MASK (15ULL << PG_PKRU_BIT)
293 #define PG_NX_MASK (1ULL << PG_NX_BIT)
295 #define PG_ERROR_W_BIT 1
297 #define PG_ERROR_P_MASK 0x01
298 #define PG_ERROR_W_MASK (1 << PG_ERROR_W_BIT)
299 #define PG_ERROR_U_MASK 0x04
300 #define PG_ERROR_RSVD_MASK 0x08
301 #define PG_ERROR_I_D_MASK 0x10
302 #define PG_ERROR_PK_MASK 0x20
304 #define MCG_CTL_P (1ULL<<8) /* MCG_CAP register available */
305 #define MCG_SER_P (1ULL<<24) /* MCA recovery/new status bits */
306 #define MCG_LMCE_P (1ULL<<27) /* Local Machine Check Supported */
308 #define MCE_CAP_DEF (MCG_CTL_P|MCG_SER_P)
309 #define MCE_BANKS_DEF 10
311 #define MCG_CAP_BANKS_MASK 0xff
313 #define MCG_STATUS_RIPV (1ULL<<0) /* restart ip valid */
314 #define MCG_STATUS_EIPV (1ULL<<1) /* ip points to correct instruction */
315 #define MCG_STATUS_MCIP (1ULL<<2) /* machine check in progress */
316 #define MCG_STATUS_LMCE (1ULL<<3) /* Local MCE signaled */
318 #define MCG_EXT_CTL_LMCE_EN (1ULL<<0) /* Local MCE enabled */
320 #define MCI_STATUS_VAL (1ULL<<63) /* valid error */
321 #define MCI_STATUS_OVER (1ULL<<62) /* previous errors lost */
322 #define MCI_STATUS_UC (1ULL<<61) /* uncorrected error */
323 #define MCI_STATUS_EN (1ULL<<60) /* error enabled */
324 #define MCI_STATUS_MISCV (1ULL<<59) /* misc error reg. valid */
325 #define MCI_STATUS_ADDRV (1ULL<<58) /* addr reg. valid */
326 #define MCI_STATUS_PCC (1ULL<<57) /* processor context corrupt */
327 #define MCI_STATUS_S (1ULL<<56) /* Signaled machine check */
328 #define MCI_STATUS_AR (1ULL<<55) /* Action required */
330 /* MISC register defines */
331 #define MCM_ADDR_SEGOFF 0 /* segment offset */
332 #define MCM_ADDR_LINEAR 1 /* linear address */
333 #define MCM_ADDR_PHYS 2 /* physical address */
334 #define MCM_ADDR_MEM 3 /* memory address */
335 #define MCM_ADDR_GENERIC 7 /* generic */
337 #define MSR_IA32_TSC 0x10
338 #define MSR_IA32_APICBASE 0x1b
339 #define MSR_IA32_APICBASE_BSP (1<<8)
340 #define MSR_IA32_APICBASE_ENABLE (1<<11)
341 #define MSR_IA32_APICBASE_EXTD (1 << 10)
342 #define MSR_IA32_APICBASE_BASE (0xfffffU<<12)
343 #define MSR_IA32_FEATURE_CONTROL 0x0000003a
344 #define MSR_TSC_ADJUST 0x0000003b
345 #define MSR_IA32_SPEC_CTRL 0x48
346 #define MSR_VIRT_SSBD 0xc001011f
347 #define MSR_IA32_PRED_CMD 0x49
348 #define MSR_IA32_ARCH_CAPABILITIES 0x10a
349 #define MSR_IA32_TSCDEADLINE 0x6e0
351 #define FEATURE_CONTROL_LOCKED (1<<0)
352 #define FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX (1<<2)
353 #define FEATURE_CONTROL_LMCE (1<<20)
355 #define MSR_P6_PERFCTR0 0xc1
357 #define MSR_IA32_SMBASE 0x9e
358 #define MSR_SMI_COUNT 0x34
359 #define MSR_MTRRcap 0xfe
360 #define MSR_MTRRcap_VCNT 8
361 #define MSR_MTRRcap_FIXRANGE_SUPPORT (1 << 8)
362 #define MSR_MTRRcap_WC_SUPPORTED (1 << 10)
364 #define MSR_IA32_SYSENTER_CS 0x174
365 #define MSR_IA32_SYSENTER_ESP 0x175
366 #define MSR_IA32_SYSENTER_EIP 0x176
368 #define MSR_MCG_CAP 0x179
369 #define MSR_MCG_STATUS 0x17a
370 #define MSR_MCG_CTL 0x17b
371 #define MSR_MCG_EXT_CTL 0x4d0
373 #define MSR_P6_EVNTSEL0 0x186
375 #define MSR_IA32_PERF_STATUS 0x198
377 #define MSR_IA32_MISC_ENABLE 0x1a0
378 /* Indicates good rep/movs microcode on some processors: */
379 #define MSR_IA32_MISC_ENABLE_DEFAULT 1
380 #define MSR_IA32_MISC_ENABLE_MWAIT (1ULL << 18)
382 #define MSR_MTRRphysBase(reg) (0x200 + 2 * (reg))
383 #define MSR_MTRRphysMask(reg) (0x200 + 2 * (reg) + 1)
385 #define MSR_MTRRphysIndex(addr) ((((addr) & ~1u) - 0x200) / 2)
387 #define MSR_MTRRfix64K_00000 0x250
388 #define MSR_MTRRfix16K_80000 0x258
389 #define MSR_MTRRfix16K_A0000 0x259
390 #define MSR_MTRRfix4K_C0000 0x268
391 #define MSR_MTRRfix4K_C8000 0x269
392 #define MSR_MTRRfix4K_D0000 0x26a
393 #define MSR_MTRRfix4K_D8000 0x26b
394 #define MSR_MTRRfix4K_E0000 0x26c
395 #define MSR_MTRRfix4K_E8000 0x26d
396 #define MSR_MTRRfix4K_F0000 0x26e
397 #define MSR_MTRRfix4K_F8000 0x26f
399 #define MSR_PAT 0x277
401 #define MSR_MTRRdefType 0x2ff
403 #define MSR_CORE_PERF_FIXED_CTR0 0x309
404 #define MSR_CORE_PERF_FIXED_CTR1 0x30a
405 #define MSR_CORE_PERF_FIXED_CTR2 0x30b
406 #define MSR_CORE_PERF_FIXED_CTR_CTRL 0x38d
407 #define MSR_CORE_PERF_GLOBAL_STATUS 0x38e
408 #define MSR_CORE_PERF_GLOBAL_CTRL 0x38f
409 #define MSR_CORE_PERF_GLOBAL_OVF_CTRL 0x390
411 #define MSR_MC0_CTL 0x400
412 #define MSR_MC0_STATUS 0x401
413 #define MSR_MC0_ADDR 0x402
414 #define MSR_MC0_MISC 0x403
416 #define MSR_IA32_RTIT_OUTPUT_BASE 0x560
417 #define MSR_IA32_RTIT_OUTPUT_MASK 0x561
418 #define MSR_IA32_RTIT_CTL 0x570
419 #define MSR_IA32_RTIT_STATUS 0x571
420 #define MSR_IA32_RTIT_CR3_MATCH 0x572
421 #define MSR_IA32_RTIT_ADDR0_A 0x580
422 #define MSR_IA32_RTIT_ADDR0_B 0x581
423 #define MSR_IA32_RTIT_ADDR1_A 0x582
424 #define MSR_IA32_RTIT_ADDR1_B 0x583
425 #define MSR_IA32_RTIT_ADDR2_A 0x584
426 #define MSR_IA32_RTIT_ADDR2_B 0x585
427 #define MSR_IA32_RTIT_ADDR3_A 0x586
428 #define MSR_IA32_RTIT_ADDR3_B 0x587
429 #define MAX_RTIT_ADDRS 8
431 #define MSR_EFER 0xc0000080
433 #define MSR_EFER_SCE (1 << 0)
434 #define MSR_EFER_LME (1 << 8)
435 #define MSR_EFER_LMA (1 << 10)
436 #define MSR_EFER_NXE (1 << 11)
437 #define MSR_EFER_SVME (1 << 12)
438 #define MSR_EFER_FFXSR (1 << 14)
440 #define MSR_STAR 0xc0000081
441 #define MSR_LSTAR 0xc0000082
442 #define MSR_CSTAR 0xc0000083
443 #define MSR_FMASK 0xc0000084
444 #define MSR_FSBASE 0xc0000100
445 #define MSR_GSBASE 0xc0000101
446 #define MSR_KERNELGSBASE 0xc0000102
447 #define MSR_TSC_AUX 0xc0000103
449 #define MSR_VM_HSAVE_PA 0xc0010117
451 #define MSR_IA32_BNDCFGS 0x00000d90
452 #define MSR_IA32_XSS 0x00000da0
454 #define XSTATE_FP_BIT 0
455 #define XSTATE_SSE_BIT 1
456 #define XSTATE_YMM_BIT 2
457 #define XSTATE_BNDREGS_BIT 3
458 #define XSTATE_BNDCSR_BIT 4
459 #define XSTATE_OPMASK_BIT 5
460 #define XSTATE_ZMM_Hi256_BIT 6
461 #define XSTATE_Hi16_ZMM_BIT 7
462 #define XSTATE_PKRU_BIT 9
464 #define XSTATE_FP_MASK (1ULL << XSTATE_FP_BIT)
465 #define XSTATE_SSE_MASK (1ULL << XSTATE_SSE_BIT)
466 #define XSTATE_YMM_MASK (1ULL << XSTATE_YMM_BIT)
467 #define XSTATE_BNDREGS_MASK (1ULL << XSTATE_BNDREGS_BIT)
468 #define XSTATE_BNDCSR_MASK (1ULL << XSTATE_BNDCSR_BIT)
469 #define XSTATE_OPMASK_MASK (1ULL << XSTATE_OPMASK_BIT)
470 #define XSTATE_ZMM_Hi256_MASK (1ULL << XSTATE_ZMM_Hi256_BIT)
471 #define XSTATE_Hi16_ZMM_MASK (1ULL << XSTATE_Hi16_ZMM_BIT)
472 #define XSTATE_PKRU_MASK (1ULL << XSTATE_PKRU_BIT)
474 /* CPUID feature words */
475 typedef enum FeatureWord
{
476 FEAT_1_EDX
, /* CPUID[1].EDX */
477 FEAT_1_ECX
, /* CPUID[1].ECX */
478 FEAT_7_0_EBX
, /* CPUID[EAX=7,ECX=0].EBX */
479 FEAT_7_0_ECX
, /* CPUID[EAX=7,ECX=0].ECX */
480 FEAT_7_0_EDX
, /* CPUID[EAX=7,ECX=0].EDX */
481 FEAT_8000_0001_EDX
, /* CPUID[8000_0001].EDX */
482 FEAT_8000_0001_ECX
, /* CPUID[8000_0001].ECX */
483 FEAT_8000_0007_EDX
, /* CPUID[8000_0007].EDX */
484 FEAT_8000_0008_EBX
, /* CPUID[8000_0008].EBX */
485 FEAT_C000_0001_EDX
, /* CPUID[C000_0001].EDX */
486 FEAT_KVM
, /* CPUID[4000_0001].EAX (KVM_CPUID_FEATURES) */
487 FEAT_KVM_HINTS
, /* CPUID[4000_0001].EDX */
488 FEAT_HYPERV_EAX
, /* CPUID[4000_0003].EAX */
489 FEAT_HYPERV_EBX
, /* CPUID[4000_0003].EBX */
490 FEAT_HYPERV_EDX
, /* CPUID[4000_0003].EDX */
491 FEAT_HV_RECOMM_EAX
, /* CPUID[4000_0004].EAX */
492 FEAT_HV_NESTED_EAX
, /* CPUID[4000_000A].EAX */
493 FEAT_SVM
, /* CPUID[8000_000A].EDX */
494 FEAT_XSAVE
, /* CPUID[EAX=0xd,ECX=1].EAX */
495 FEAT_6_EAX
, /* CPUID[6].EAX */
496 FEAT_XSAVE_COMP_LO
, /* CPUID[EAX=0xd,ECX=0].EAX */
497 FEAT_XSAVE_COMP_HI
, /* CPUID[EAX=0xd,ECX=0].EDX */
498 FEAT_ARCH_CAPABILITIES
,
502 typedef uint32_t FeatureWordArray
[FEATURE_WORDS
];
504 /* cpuid_features bits */
505 #define CPUID_FP87 (1U << 0)
506 #define CPUID_VME (1U << 1)
507 #define CPUID_DE (1U << 2)
508 #define CPUID_PSE (1U << 3)
509 #define CPUID_TSC (1U << 4)
510 #define CPUID_MSR (1U << 5)
511 #define CPUID_PAE (1U << 6)
512 #define CPUID_MCE (1U << 7)
513 #define CPUID_CX8 (1U << 8)
514 #define CPUID_APIC (1U << 9)
515 #define CPUID_SEP (1U << 11) /* sysenter/sysexit */
516 #define CPUID_MTRR (1U << 12)
517 #define CPUID_PGE (1U << 13)
518 #define CPUID_MCA (1U << 14)
519 #define CPUID_CMOV (1U << 15)
520 #define CPUID_PAT (1U << 16)
521 #define CPUID_PSE36 (1U << 17)
522 #define CPUID_PN (1U << 18)
523 #define CPUID_CLFLUSH (1U << 19)
524 #define CPUID_DTS (1U << 21)
525 #define CPUID_ACPI (1U << 22)
526 #define CPUID_MMX (1U << 23)
527 #define CPUID_FXSR (1U << 24)
528 #define CPUID_SSE (1U << 25)
529 #define CPUID_SSE2 (1U << 26)
530 #define CPUID_SS (1U << 27)
531 #define CPUID_HT (1U << 28)
532 #define CPUID_TM (1U << 29)
533 #define CPUID_IA64 (1U << 30)
534 #define CPUID_PBE (1U << 31)
536 #define CPUID_EXT_SSE3 (1U << 0)
537 #define CPUID_EXT_PCLMULQDQ (1U << 1)
538 #define CPUID_EXT_DTES64 (1U << 2)
539 #define CPUID_EXT_MONITOR (1U << 3)
540 #define CPUID_EXT_DSCPL (1U << 4)
541 #define CPUID_EXT_VMX (1U << 5)
542 #define CPUID_EXT_SMX (1U << 6)
543 #define CPUID_EXT_EST (1U << 7)
544 #define CPUID_EXT_TM2 (1U << 8)
545 #define CPUID_EXT_SSSE3 (1U << 9)
546 #define CPUID_EXT_CID (1U << 10)
547 #define CPUID_EXT_FMA (1U << 12)
548 #define CPUID_EXT_CX16 (1U << 13)
549 #define CPUID_EXT_XTPR (1U << 14)
550 #define CPUID_EXT_PDCM (1U << 15)
551 #define CPUID_EXT_PCID (1U << 17)
552 #define CPUID_EXT_DCA (1U << 18)
553 #define CPUID_EXT_SSE41 (1U << 19)
554 #define CPUID_EXT_SSE42 (1U << 20)
555 #define CPUID_EXT_X2APIC (1U << 21)
556 #define CPUID_EXT_MOVBE (1U << 22)
557 #define CPUID_EXT_POPCNT (1U << 23)
558 #define CPUID_EXT_TSC_DEADLINE_TIMER (1U << 24)
559 #define CPUID_EXT_AES (1U << 25)
560 #define CPUID_EXT_XSAVE (1U << 26)
561 #define CPUID_EXT_OSXSAVE (1U << 27)
562 #define CPUID_EXT_AVX (1U << 28)
563 #define CPUID_EXT_F16C (1U << 29)
564 #define CPUID_EXT_RDRAND (1U << 30)
565 #define CPUID_EXT_HYPERVISOR (1U << 31)
567 #define CPUID_EXT2_FPU (1U << 0)
568 #define CPUID_EXT2_VME (1U << 1)
569 #define CPUID_EXT2_DE (1U << 2)
570 #define CPUID_EXT2_PSE (1U << 3)
571 #define CPUID_EXT2_TSC (1U << 4)
572 #define CPUID_EXT2_MSR (1U << 5)
573 #define CPUID_EXT2_PAE (1U << 6)
574 #define CPUID_EXT2_MCE (1U << 7)
575 #define CPUID_EXT2_CX8 (1U << 8)
576 #define CPUID_EXT2_APIC (1U << 9)
577 #define CPUID_EXT2_SYSCALL (1U << 11)
578 #define CPUID_EXT2_MTRR (1U << 12)
579 #define CPUID_EXT2_PGE (1U << 13)
580 #define CPUID_EXT2_MCA (1U << 14)
581 #define CPUID_EXT2_CMOV (1U << 15)
582 #define CPUID_EXT2_PAT (1U << 16)
583 #define CPUID_EXT2_PSE36 (1U << 17)
584 #define CPUID_EXT2_MP (1U << 19)
585 #define CPUID_EXT2_NX (1U << 20)
586 #define CPUID_EXT2_MMXEXT (1U << 22)
587 #define CPUID_EXT2_MMX (1U << 23)
588 #define CPUID_EXT2_FXSR (1U << 24)
589 #define CPUID_EXT2_FFXSR (1U << 25)
590 #define CPUID_EXT2_PDPE1GB (1U << 26)
591 #define CPUID_EXT2_RDTSCP (1U << 27)
592 #define CPUID_EXT2_LM (1U << 29)
593 #define CPUID_EXT2_3DNOWEXT (1U << 30)
594 #define CPUID_EXT2_3DNOW (1U << 31)
596 /* CPUID[8000_0001].EDX bits that are aliase of CPUID[1].EDX bits on AMD CPUs */
597 #define CPUID_EXT2_AMD_ALIASES (CPUID_EXT2_FPU | CPUID_EXT2_VME | \
598 CPUID_EXT2_DE | CPUID_EXT2_PSE | \
599 CPUID_EXT2_TSC | CPUID_EXT2_MSR | \
600 CPUID_EXT2_PAE | CPUID_EXT2_MCE | \
601 CPUID_EXT2_CX8 | CPUID_EXT2_APIC | \
602 CPUID_EXT2_MTRR | CPUID_EXT2_PGE | \
603 CPUID_EXT2_MCA | CPUID_EXT2_CMOV | \
604 CPUID_EXT2_PAT | CPUID_EXT2_PSE36 | \
605 CPUID_EXT2_MMX | CPUID_EXT2_FXSR)
607 #define CPUID_EXT3_LAHF_LM (1U << 0)
608 #define CPUID_EXT3_CMP_LEG (1U << 1)
609 #define CPUID_EXT3_SVM (1U << 2)
610 #define CPUID_EXT3_EXTAPIC (1U << 3)
611 #define CPUID_EXT3_CR8LEG (1U << 4)
612 #define CPUID_EXT3_ABM (1U << 5)
613 #define CPUID_EXT3_SSE4A (1U << 6)
614 #define CPUID_EXT3_MISALIGNSSE (1U << 7)
615 #define CPUID_EXT3_3DNOWPREFETCH (1U << 8)
616 #define CPUID_EXT3_OSVW (1U << 9)
617 #define CPUID_EXT3_IBS (1U << 10)
618 #define CPUID_EXT3_XOP (1U << 11)
619 #define CPUID_EXT3_SKINIT (1U << 12)
620 #define CPUID_EXT3_WDT (1U << 13)
621 #define CPUID_EXT3_LWP (1U << 15)
622 #define CPUID_EXT3_FMA4 (1U << 16)
623 #define CPUID_EXT3_TCE (1U << 17)
624 #define CPUID_EXT3_NODEID (1U << 19)
625 #define CPUID_EXT3_TBM (1U << 21)
626 #define CPUID_EXT3_TOPOEXT (1U << 22)
627 #define CPUID_EXT3_PERFCORE (1U << 23)
628 #define CPUID_EXT3_PERFNB (1U << 24)
630 #define CPUID_SVM_NPT (1U << 0)
631 #define CPUID_SVM_LBRV (1U << 1)
632 #define CPUID_SVM_SVMLOCK (1U << 2)
633 #define CPUID_SVM_NRIPSAVE (1U << 3)
634 #define CPUID_SVM_TSCSCALE (1U << 4)
635 #define CPUID_SVM_VMCBCLEAN (1U << 5)
636 #define CPUID_SVM_FLUSHASID (1U << 6)
637 #define CPUID_SVM_DECODEASSIST (1U << 7)
638 #define CPUID_SVM_PAUSEFILTER (1U << 10)
639 #define CPUID_SVM_PFTHRESHOLD (1U << 12)
641 #define CPUID_7_0_EBX_FSGSBASE (1U << 0)
642 #define CPUID_7_0_EBX_BMI1 (1U << 3)
643 #define CPUID_7_0_EBX_HLE (1U << 4)
644 #define CPUID_7_0_EBX_AVX2 (1U << 5)
645 #define CPUID_7_0_EBX_SMEP (1U << 7)
646 #define CPUID_7_0_EBX_BMI2 (1U << 8)
647 #define CPUID_7_0_EBX_ERMS (1U << 9)
648 #define CPUID_7_0_EBX_INVPCID (1U << 10)
649 #define CPUID_7_0_EBX_RTM (1U << 11)
650 #define CPUID_7_0_EBX_MPX (1U << 14)
651 #define CPUID_7_0_EBX_AVX512F (1U << 16) /* AVX-512 Foundation */
652 #define CPUID_7_0_EBX_AVX512DQ (1U << 17) /* AVX-512 Doubleword & Quadword Instrs */
653 #define CPUID_7_0_EBX_RDSEED (1U << 18)
654 #define CPUID_7_0_EBX_ADX (1U << 19)
655 #define CPUID_7_0_EBX_SMAP (1U << 20)
656 #define CPUID_7_0_EBX_AVX512IFMA (1U << 21) /* AVX-512 Integer Fused Multiply Add */
657 #define CPUID_7_0_EBX_PCOMMIT (1U << 22) /* Persistent Commit */
658 #define CPUID_7_0_EBX_CLFLUSHOPT (1U << 23) /* Flush a Cache Line Optimized */
659 #define CPUID_7_0_EBX_CLWB (1U << 24) /* Cache Line Write Back */
660 #define CPUID_7_0_EBX_INTEL_PT (1U << 25) /* Intel Processor Trace */
661 #define CPUID_7_0_EBX_AVX512PF (1U << 26) /* AVX-512 Prefetch */
662 #define CPUID_7_0_EBX_AVX512ER (1U << 27) /* AVX-512 Exponential and Reciprocal */
663 #define CPUID_7_0_EBX_AVX512CD (1U << 28) /* AVX-512 Conflict Detection */
664 #define CPUID_7_0_EBX_SHA_NI (1U << 29) /* SHA1/SHA256 Instruction Extensions */
665 #define CPUID_7_0_EBX_AVX512BW (1U << 30) /* AVX-512 Byte and Word Instructions */
666 #define CPUID_7_0_EBX_AVX512VL (1U << 31) /* AVX-512 Vector Length Extensions */
668 #define CPUID_7_0_ECX_AVX512BMI (1U << 1)
669 #define CPUID_7_0_ECX_VBMI (1U << 1) /* AVX-512 Vector Byte Manipulation Instrs */
670 #define CPUID_7_0_ECX_UMIP (1U << 2)
671 #define CPUID_7_0_ECX_PKU (1U << 3)
672 #define CPUID_7_0_ECX_OSPKE (1U << 4)
673 #define CPUID_7_0_ECX_VBMI2 (1U << 6) /* Additional VBMI Instrs */
674 #define CPUID_7_0_ECX_GFNI (1U << 8)
675 #define CPUID_7_0_ECX_VAES (1U << 9)
676 #define CPUID_7_0_ECX_VPCLMULQDQ (1U << 10)
677 #define CPUID_7_0_ECX_AVX512VNNI (1U << 11)
678 #define CPUID_7_0_ECX_AVX512BITALG (1U << 12)
679 #define CPUID_7_0_ECX_AVX512_VPOPCNTDQ (1U << 14) /* POPCNT for vectors of DW/QW */
680 #define CPUID_7_0_ECX_LA57 (1U << 16)
681 #define CPUID_7_0_ECX_RDPID (1U << 22)
682 #define CPUID_7_0_ECX_CLDEMOTE (1U << 25) /* CLDEMOTE Instruction */
683 #define CPUID_7_0_ECX_MOVDIRI (1U << 27) /* MOVDIRI Instruction */
684 #define CPUID_7_0_ECX_MOVDIR64B (1U << 28) /* MOVDIR64B Instruction */
686 #define CPUID_7_0_EDX_AVX512_4VNNIW (1U << 2) /* AVX512 Neural Network Instructions */
687 #define CPUID_7_0_EDX_AVX512_4FMAPS (1U << 3) /* AVX512 Multiply Accumulation Single Precision */
688 #define CPUID_7_0_EDX_SPEC_CTRL (1U << 26) /* Speculation Control */
689 #define CPUID_7_0_EDX_ARCH_CAPABILITIES (1U << 29) /*Arch Capabilities*/
690 #define CPUID_7_0_EDX_SPEC_CTRL_SSBD (1U << 31) /* Speculative Store Bypass Disable */
692 #define CPUID_8000_0008_EBX_WBNOINVD (1U << 9) /* Write back and
693 do not invalidate cache */
694 #define CPUID_8000_0008_EBX_IBPB (1U << 12) /* Indirect Branch Prediction Barrier */
696 #define CPUID_XSAVE_XSAVEOPT (1U << 0)
697 #define CPUID_XSAVE_XSAVEC (1U << 1)
698 #define CPUID_XSAVE_XGETBV1 (1U << 2)
699 #define CPUID_XSAVE_XSAVES (1U << 3)
701 #define CPUID_6_EAX_ARAT (1U << 2)
703 /* CPUID[0x80000007].EDX flags: */
704 #define CPUID_APM_INVTSC (1U << 8)
706 #define CPUID_VENDOR_SZ 12
708 #define CPUID_VENDOR_INTEL_1 0x756e6547 /* "Genu" */
709 #define CPUID_VENDOR_INTEL_2 0x49656e69 /* "ineI" */
710 #define CPUID_VENDOR_INTEL_3 0x6c65746e /* "ntel" */
711 #define CPUID_VENDOR_INTEL "GenuineIntel"
713 #define CPUID_VENDOR_AMD_1 0x68747541 /* "Auth" */
714 #define CPUID_VENDOR_AMD_2 0x69746e65 /* "enti" */
715 #define CPUID_VENDOR_AMD_3 0x444d4163 /* "cAMD" */
716 #define CPUID_VENDOR_AMD "AuthenticAMD"
718 #define CPUID_VENDOR_VIA "CentaurHauls"
720 #define CPUID_VENDOR_HYGON "HygonGenuine"
722 #define CPUID_MWAIT_IBE (1U << 1) /* Interrupts can exit capability */
723 #define CPUID_MWAIT_EMX (1U << 0) /* enumeration supported */
725 /* CPUID[0xB].ECX level types */
726 #define CPUID_TOPOLOGY_LEVEL_INVALID (0U << 8)
727 #define CPUID_TOPOLOGY_LEVEL_SMT (1U << 8)
728 #define CPUID_TOPOLOGY_LEVEL_CORE (2U << 8)
730 /* MSR Feature Bits */
731 #define MSR_ARCH_CAP_RDCL_NO (1U << 0)
732 #define MSR_ARCH_CAP_IBRS_ALL (1U << 1)
733 #define MSR_ARCH_CAP_RSBA (1U << 2)
734 #define MSR_ARCH_CAP_SKIP_L1DFL_VMENTRY (1U << 3)
735 #define MSR_ARCH_CAP_SSB_NO (1U << 4)
737 /* Supported Hyper-V Enlightenments */
738 #define HYPERV_FEAT_RELAXED 0
739 #define HYPERV_FEAT_VAPIC 1
740 #define HYPERV_FEAT_TIME 2
741 #define HYPERV_FEAT_CRASH 3
742 #define HYPERV_FEAT_RESET 4
743 #define HYPERV_FEAT_VPINDEX 5
744 #define HYPERV_FEAT_RUNTIME 6
745 #define HYPERV_FEAT_SYNIC 7
746 #define HYPERV_FEAT_STIMER 8
747 #define HYPERV_FEAT_FREQUENCIES 9
748 #define HYPERV_FEAT_REENLIGHTENMENT 10
749 #define HYPERV_FEAT_TLBFLUSH 11
750 #define HYPERV_FEAT_EVMCS 12
751 #define HYPERV_FEAT_IPI 13
753 #ifndef HYPERV_SPINLOCK_NEVER_RETRY
754 #define HYPERV_SPINLOCK_NEVER_RETRY 0xFFFFFFFF
757 #define EXCP00_DIVZ 0
760 #define EXCP03_INT3 3
761 #define EXCP04_INTO 4
762 #define EXCP05_BOUND 5
763 #define EXCP06_ILLOP 6
764 #define EXCP07_PREX 7
765 #define EXCP08_DBLE 8
766 #define EXCP09_XERR 9
767 #define EXCP0A_TSS 10
768 #define EXCP0B_NOSEG 11
769 #define EXCP0C_STACK 12
770 #define EXCP0D_GPF 13
771 #define EXCP0E_PAGE 14
772 #define EXCP10_COPR 16
773 #define EXCP11_ALGN 17
774 #define EXCP12_MCHK 18
776 #define EXCP_SYSCALL 0x100 /* only happens in user only emulation
777 for syscall instruction */
778 #define EXCP_VMEXIT 0x100
780 /* i386-specific interrupt pending bits. */
781 #define CPU_INTERRUPT_POLL CPU_INTERRUPT_TGT_EXT_1
782 #define CPU_INTERRUPT_SMI CPU_INTERRUPT_TGT_EXT_2
783 #define CPU_INTERRUPT_NMI CPU_INTERRUPT_TGT_EXT_3
784 #define CPU_INTERRUPT_MCE CPU_INTERRUPT_TGT_EXT_4
785 #define CPU_INTERRUPT_VIRQ CPU_INTERRUPT_TGT_INT_0
786 #define CPU_INTERRUPT_SIPI CPU_INTERRUPT_TGT_INT_1
787 #define CPU_INTERRUPT_TPR CPU_INTERRUPT_TGT_INT_2
789 /* Use a clearer name for this. */
790 #define CPU_INTERRUPT_INIT CPU_INTERRUPT_RESET
792 /* Instead of computing the condition codes after each x86 instruction,
793 * QEMU just stores one operand (called CC_SRC), the result
794 * (called CC_DST) and the type of operation (called CC_OP). When the
795 * condition codes are needed, the condition codes can be calculated
796 * using this information. Condition codes are not generated if they
797 * are only needed for conditional branches.
800 CC_OP_DYNAMIC
, /* must use dynamic code to get cc_op */
801 CC_OP_EFLAGS
, /* all cc are explicitly computed, CC_SRC = flags */
803 CC_OP_MULB
, /* modify all flags, C, O = (CC_SRC != 0) */
808 CC_OP_ADDB
, /* modify all flags, CC_DST = res, CC_SRC = src1 */
813 CC_OP_ADCB
, /* modify all flags, CC_DST = res, CC_SRC = src1 */
818 CC_OP_SUBB
, /* modify all flags, CC_DST = res, CC_SRC = src1 */
823 CC_OP_SBBB
, /* modify all flags, CC_DST = res, CC_SRC = src1 */
828 CC_OP_LOGICB
, /* modify all flags, CC_DST = res */
833 CC_OP_INCB
, /* modify all flags except, CC_DST = res, CC_SRC = C */
838 CC_OP_DECB
, /* modify all flags except, CC_DST = res, CC_SRC = C */
843 CC_OP_SHLB
, /* modify all flags, CC_DST = res, CC_SRC.msb = C */
848 CC_OP_SARB
, /* modify all flags, CC_DST = res, CC_SRC.lsb = C */
853 CC_OP_BMILGB
, /* Z,S via CC_DST, C = SRC==0; O=0; P,A undefined */
858 CC_OP_ADCX
, /* CC_DST = C, CC_SRC = rest. */
859 CC_OP_ADOX
, /* CC_DST = O, CC_SRC = rest. */
860 CC_OP_ADCOX
, /* CC_DST = C, CC_SRC2 = O, CC_SRC = rest. */
862 CC_OP_CLR
, /* Z set, all other flags clear. */
863 CC_OP_POPCNT
, /* Z via CC_SRC, all other flags clear. */
868 typedef struct SegmentCache
{
875 #define MMREG_UNION(n, bits) \
877 uint8_t _b_##n[(bits)/8]; \
878 uint16_t _w_##n[(bits)/16]; \
879 uint32_t _l_##n[(bits)/32]; \
880 uint64_t _q_##n[(bits)/64]; \
881 float32 _s_##n[(bits)/32]; \
882 float64 _d_##n[(bits)/64]; \
899 typedef MMREG_UNION(ZMMReg
, 512) ZMMReg
;
900 typedef MMREG_UNION(MMXReg
, 64) MMXReg
;
902 typedef struct BNDReg
{
907 typedef struct BNDCSReg
{
912 #define BNDCFG_ENABLE 1ULL
913 #define BNDCFG_BNDPRESERVE 2ULL
914 #define BNDCFG_BDIR_MASK TARGET_PAGE_MASK
916 #ifdef HOST_WORDS_BIGENDIAN
917 #define ZMM_B(n) _b_ZMMReg[63 - (n)]
918 #define ZMM_W(n) _w_ZMMReg[31 - (n)]
919 #define ZMM_L(n) _l_ZMMReg[15 - (n)]
920 #define ZMM_S(n) _s_ZMMReg[15 - (n)]
921 #define ZMM_Q(n) _q_ZMMReg[7 - (n)]
922 #define ZMM_D(n) _d_ZMMReg[7 - (n)]
924 #define MMX_B(n) _b_MMXReg[7 - (n)]
925 #define MMX_W(n) _w_MMXReg[3 - (n)]
926 #define MMX_L(n) _l_MMXReg[1 - (n)]
927 #define MMX_S(n) _s_MMXReg[1 - (n)]
929 #define ZMM_B(n) _b_ZMMReg[n]
930 #define ZMM_W(n) _w_ZMMReg[n]
931 #define ZMM_L(n) _l_ZMMReg[n]
932 #define ZMM_S(n) _s_ZMMReg[n]
933 #define ZMM_Q(n) _q_ZMMReg[n]
934 #define ZMM_D(n) _d_ZMMReg[n]
936 #define MMX_B(n) _b_MMXReg[n]
937 #define MMX_W(n) _w_MMXReg[n]
938 #define MMX_L(n) _l_MMXReg[n]
939 #define MMX_S(n) _s_MMXReg[n]
941 #define MMX_Q(n) _q_MMXReg[n]
944 floatx80 d
__attribute__((aligned(16)));
953 #define CPU_NB_REGS64 16
954 #define CPU_NB_REGS32 8
957 #define CPU_NB_REGS CPU_NB_REGS64
959 #define CPU_NB_REGS CPU_NB_REGS32
962 #define MAX_FIXED_COUNTERS 3
963 #define MAX_GP_COUNTERS (MSR_IA32_PERF_STATUS - MSR_P6_EVNTSEL0)
965 #define TARGET_INSN_START_EXTRA_WORDS 1
967 #define NB_OPMASK_REGS 8
969 /* CPU can't have 0xFFFFFFFF APIC ID, use that value to distinguish
970 * that APIC ID hasn't been set yet
972 #define UNASSIGNED_APIC_ID 0xFFFFFFFF
974 typedef union X86LegacyXSaveArea
{
986 uint8_t xmm_regs
[16][16];
989 } X86LegacyXSaveArea
;
991 typedef struct X86XSaveHeader
{
995 uint8_t reserved
[40];
998 /* Ext. save area 2: AVX State */
999 typedef struct XSaveAVX
{
1000 uint8_t ymmh
[16][16];
1003 /* Ext. save area 3: BNDREG */
1004 typedef struct XSaveBNDREG
{
1008 /* Ext. save area 4: BNDCSR */
1009 typedef union XSaveBNDCSR
{
1014 /* Ext. save area 5: Opmask */
1015 typedef struct XSaveOpmask
{
1016 uint64_t opmask_regs
[NB_OPMASK_REGS
];
1019 /* Ext. save area 6: ZMM_Hi256 */
1020 typedef struct XSaveZMM_Hi256
{
1021 uint8_t zmm_hi256
[16][32];
1024 /* Ext. save area 7: Hi16_ZMM */
1025 typedef struct XSaveHi16_ZMM
{
1026 uint8_t hi16_zmm
[16][64];
1029 /* Ext. save area 9: PKRU state */
1030 typedef struct XSavePKRU
{
1035 typedef struct X86XSaveArea
{
1036 X86LegacyXSaveArea legacy
;
1037 X86XSaveHeader header
;
1039 /* Extended save areas: */
1043 uint8_t padding
[960 - 576 - sizeof(XSaveAVX
)];
1045 XSaveBNDREG bndreg_state
;
1046 XSaveBNDCSR bndcsr_state
;
1047 /* AVX-512 State: */
1048 XSaveOpmask opmask_state
;
1049 XSaveZMM_Hi256 zmm_hi256_state
;
1050 XSaveHi16_ZMM hi16_zmm_state
;
1052 XSavePKRU pkru_state
;
1055 QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea
, avx_state
) != 0x240);
1056 QEMU_BUILD_BUG_ON(sizeof(XSaveAVX
) != 0x100);
1057 QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea
, bndreg_state
) != 0x3c0);
1058 QEMU_BUILD_BUG_ON(sizeof(XSaveBNDREG
) != 0x40);
1059 QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea
, bndcsr_state
) != 0x400);
1060 QEMU_BUILD_BUG_ON(sizeof(XSaveBNDCSR
) != 0x40);
1061 QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea
, opmask_state
) != 0x440);
1062 QEMU_BUILD_BUG_ON(sizeof(XSaveOpmask
) != 0x40);
1063 QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea
, zmm_hi256_state
) != 0x480);
1064 QEMU_BUILD_BUG_ON(sizeof(XSaveZMM_Hi256
) != 0x200);
1065 QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea
, hi16_zmm_state
) != 0x680);
1066 QEMU_BUILD_BUG_ON(sizeof(XSaveHi16_ZMM
) != 0x400);
1067 QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea
, pkru_state
) != 0xA80);
1068 QEMU_BUILD_BUG_ON(sizeof(XSavePKRU
) != 0x8);
1070 typedef enum TPRAccess
{
1075 /* Cache information data structures: */
1083 typedef struct CPUCacheInfo
{
1084 enum CacheType type
;
1088 /* Line size, in bytes */
1092 * Note: representation of fully-associative caches is not implemented
1094 uint8_t associativity
;
1095 /* Physical line partitions. CPUID[0x8000001D].EBX, CPUID[4].EBX */
1097 /* Number of sets. CPUID[0x8000001D].ECX, CPUID[4].ECX */
1101 * AMD-specific: CPUID[0x80000005], CPUID[0x80000006].
1102 * (Is this synonym to @partitions?)
1104 uint8_t lines_per_tag
;
1106 /* Self-initializing cache */
1109 * WBINVD/INVD is not guaranteed to act upon lower level caches of
1110 * non-originating threads sharing this cache.
1111 * CPUID[4].EDX[bit 0], CPUID[0x8000001D].EDX[bit 0]
1113 bool no_invd_sharing
;
1115 * Cache is inclusive of lower cache levels.
1116 * CPUID[4].EDX[bit 1], CPUID[0x8000001D].EDX[bit 1].
1120 * A complex function is used to index the cache, potentially using all
1121 * address bits. CPUID[4].EDX[bit 2].
1123 bool complex_indexing
;
1127 typedef struct CPUCaches
{
1128 CPUCacheInfo
*l1d_cache
;
1129 CPUCacheInfo
*l1i_cache
;
1130 CPUCacheInfo
*l2_cache
;
1131 CPUCacheInfo
*l3_cache
;
1134 typedef struct CPUX86State
{
1135 /* standard registers */
1136 target_ulong regs
[CPU_NB_REGS
];
1138 target_ulong eflags
; /* eflags register. During CPU emulation, CC
1139 flags and DF are set to zero because they are
1142 /* emulator internal eflags handling */
1143 target_ulong cc_dst
;
1144 target_ulong cc_src
;
1145 target_ulong cc_src2
;
1147 int32_t df
; /* D flag : 1 if D = 0, -1 if D = 1 */
1148 uint32_t hflags
; /* TB flags, see HF_xxx constants. These flags
1149 are known at translation time. */
1150 uint32_t hflags2
; /* various other flags, see HF2_xxx constants. */
1153 SegmentCache segs
[6]; /* selector values */
1156 SegmentCache gdt
; /* only base and limit are used */
1157 SegmentCache idt
; /* only base and limit are used */
1159 target_ulong cr
[5]; /* NOTE: cr1 is unused */
1163 BNDCSReg bndcs_regs
;
1164 uint64_t msr_bndcfgs
;
1167 /* Beginning of state preserved by INIT (dummy marker). */
1168 struct {} start_init_save
;
1171 unsigned int fpstt
; /* top of stack index */
1174 uint8_t fptags
[8]; /* 0 = valid, 1 = empty */
1176 /* KVM-only so far */
1181 /* emulator internal variables */
1182 float_status fp_status
;
1185 float_status mmx_status
; /* for 3DNow! float ops */
1186 float_status sse_status
;
1188 ZMMReg xmm_regs
[CPU_NB_REGS
== 8 ? 8 : 32];
1192 XMMReg ymmh_regs
[CPU_NB_REGS
];
1194 uint64_t opmask_regs
[NB_OPMASK_REGS
];
1195 YMMReg zmmh_regs
[CPU_NB_REGS
];
1196 ZMMReg hi16_zmm_regs
[CPU_NB_REGS
];
1198 /* sysenter registers */
1199 uint32_t sysenter_cs
;
1200 target_ulong sysenter_esp
;
1201 target_ulong sysenter_eip
;
1206 #ifdef TARGET_X86_64
1210 target_ulong kernelgsbase
;
1214 uint64_t tsc_adjust
;
1215 uint64_t tsc_deadline
;
1220 uint64_t mcg_status
;
1221 uint64_t msr_ia32_misc_enable
;
1222 uint64_t msr_ia32_feature_control
;
1224 uint64_t msr_fixed_ctr_ctrl
;
1225 uint64_t msr_global_ctrl
;
1226 uint64_t msr_global_status
;
1227 uint64_t msr_global_ovf_ctrl
;
1228 uint64_t msr_fixed_counters
[MAX_FIXED_COUNTERS
];
1229 uint64_t msr_gp_counters
[MAX_GP_COUNTERS
];
1230 uint64_t msr_gp_evtsel
[MAX_GP_COUNTERS
];
1234 uint64_t msr_smi_count
;
1241 /* End of state preserved by INIT (dummy marker). */
1242 struct {} end_init_save
;
1244 uint64_t system_time_msr
;
1245 uint64_t wall_clock_msr
;
1246 uint64_t steal_time_msr
;
1247 uint64_t async_pf_en_msr
;
1248 uint64_t pv_eoi_en_msr
;
1250 /* Partition-wide HV MSRs, will be updated only on the first vcpu */
1251 uint64_t msr_hv_hypercall
;
1252 uint64_t msr_hv_guest_os_id
;
1253 uint64_t msr_hv_tsc
;
1255 /* Per-VCPU HV MSRs */
1256 uint64_t msr_hv_vapic
;
1257 uint64_t msr_hv_crash_params
[HV_CRASH_PARAMS
];
1258 uint64_t msr_hv_runtime
;
1259 uint64_t msr_hv_synic_control
;
1260 uint64_t msr_hv_synic_evt_page
;
1261 uint64_t msr_hv_synic_msg_page
;
1262 uint64_t msr_hv_synic_sint
[HV_SINT_COUNT
];
1263 uint64_t msr_hv_stimer_config
[HV_STIMER_COUNT
];
1264 uint64_t msr_hv_stimer_count
[HV_STIMER_COUNT
];
1265 uint64_t msr_hv_reenlightenment_control
;
1266 uint64_t msr_hv_tsc_emulation_control
;
1267 uint64_t msr_hv_tsc_emulation_status
;
1269 uint64_t msr_rtit_ctrl
;
1270 uint64_t msr_rtit_status
;
1271 uint64_t msr_rtit_output_base
;
1272 uint64_t msr_rtit_output_mask
;
1273 uint64_t msr_rtit_cr3_match
;
1274 uint64_t msr_rtit_addrs
[MAX_RTIT_ADDRS
];
1276 /* exception/interrupt handling */
1278 int exception_is_int
;
1279 target_ulong exception_next_eip
;
1280 target_ulong dr
[8]; /* debug registers; note dr4 and dr5 are unused */
1282 struct CPUBreakpoint
*cpu_breakpoint
[4];
1283 struct CPUWatchpoint
*cpu_watchpoint
[4];
1284 }; /* break/watchpoints for dr[0..3] */
1285 int old_exception
; /* exception in flight */
1288 uint64_t tsc_offset
;
1290 uint16_t intercept_cr_read
;
1291 uint16_t intercept_cr_write
;
1292 uint16_t intercept_dr_read
;
1293 uint16_t intercept_dr_write
;
1294 uint32_t intercept_exceptions
;
1295 uint64_t nested_cr3
;
1296 uint32_t nested_pg_mode
;
1299 /* KVM states, automatically cleared on reset */
1300 uint8_t nmi_injected
;
1301 uint8_t nmi_pending
;
1305 /* Fields up to this point are cleared by a CPU reset */
1306 struct {} end_reset_fields
;
1308 /* Fields after this point are preserved across CPU reset. */
1310 /* processor features (e.g. for CPUID insn) */
1311 /* Minimum level/xlevel/xlevel2, based on CPU model + features */
1312 uint32_t cpuid_min_level
, cpuid_min_xlevel
, cpuid_min_xlevel2
;
1313 /* Maximum level/xlevel/xlevel2 value for auto-assignment: */
1314 uint32_t cpuid_max_level
, cpuid_max_xlevel
, cpuid_max_xlevel2
;
1315 /* Actual level/xlevel/xlevel2 value: */
1316 uint32_t cpuid_level
, cpuid_xlevel
, cpuid_xlevel2
;
1317 uint32_t cpuid_vendor1
;
1318 uint32_t cpuid_vendor2
;
1319 uint32_t cpuid_vendor3
;
1320 uint32_t cpuid_version
;
1321 FeatureWordArray features
;
1322 /* Features that were explicitly enabled/disabled */
1323 FeatureWordArray user_features
;
1324 uint32_t cpuid_model
[12];
1325 /* Cache information for CPUID. When legacy-cache=on, the cache data
1326 * on each CPUID leaf will be different, because we keep compatibility
1327 * with old QEMU versions.
1329 CPUCaches cache_info_cpuid2
, cache_info_cpuid4
, cache_info_amd
;
1332 uint64_t mtrr_fixed
[11];
1333 uint64_t mtrr_deftype
;
1334 MTRRVar mtrr_var
[MSR_MTRRcap_VCNT
];
1338 int32_t exception_injected
;
1339 int32_t interrupt_injected
;
1340 uint8_t soft_interrupt
;
1341 uint8_t has_error_code
;
1343 uint32_t sipi_vector
;
1346 int64_t user_tsc_khz
; /* for sanity check only */
1347 #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
1350 #if defined(CONFIG_HVF)
1351 HVFX86EmulatorState
*hvf_emul
;
1356 uint64_t mcg_ext_ctl
;
1357 uint64_t mce_banks
[MCE_BANKS_DEF
*4];
1361 uint16_t fpus_vmstate
;
1362 uint16_t fptag_vmstate
;
1363 uint16_t fpregs_format_vmstate
;
1367 TPRAccess tpr_access_type
;
1374 * @env: #CPUX86State
1375 * @migratable: If set, only migratable flags will be accepted when "enforce"
1376 * mode is used, and only migratable flags will be included in the "host"
1383 CPUState parent_obj
;
1386 CPUNegativeOffsetState neg
;
1389 int hyperv_spinlock_attempts
;
1390 char *hyperv_vendor_id
;
1391 bool hyperv_synic_kvm_only
;
1392 uint64_t hyperv_features
;
1399 bool migrate_smi_count
;
1400 bool max_features
; /* Enable all supported features automatically */
1403 /* Enables publishing of TSC increment and Local APIC bus frequencies to
1404 * the guest OS in CPUID page 0x40000010, the same way that VMWare does. */
1405 bool vmware_cpuid_freq
;
1407 /* if true the CPUID code directly forward host cache leaves to the guest */
1408 bool cache_info_passthrough
;
1410 /* if true the CPUID code directly forwards
1411 * host monitor/mwait leaves to the guest */
1419 /* Features that were filtered out because of missing host capabilities */
1420 uint32_t filtered_features
[FEATURE_WORDS
];
1422 /* Enable PMU CPUID bits. This can't be enabled by default yet because
1423 * it doesn't have ABI stability guarantees, as it passes all PMU CPUID
1424 * bits returned by GET_SUPPORTED_CPUID (that depend on host CPU and kernel
1425 * capabilities) directly to the guest.
1429 /* LMCE support can be enabled/disabled via cpu option 'lmce=on/off'. It is
1430 * disabled by default to avoid breaking migration between QEMU with
1431 * different LMCE configurations.
1435 /* Compatibility bits for old machine types.
1436 * If true present virtual l3 cache for VM, the vcpus in the same virtual
1437 * socket share an virtual l3 cache.
1439 bool enable_l3_cache
;
1441 /* Compatibility bits for old machine types.
1442 * If true present the old cache topology information
1446 /* Compatibility bits for old machine types: */
1447 bool enable_cpuid_0xb
;
1449 /* Enable auto level-increase for all CPUID leaves */
1450 bool full_cpuid_auto_level
;
1452 /* Enable auto level-increase for Intel Processor Trace leave */
1453 bool intel_pt_auto_level
;
1455 /* if true fill the top bits of the MTRR_PHYSMASKn variable range */
1456 bool fill_mtrr_mask
;
1458 /* if true override the phys_bits value with a value read from the host */
1459 bool host_phys_bits
;
1461 /* if set, limit maximum value for phys_bits when host_phys_bits is true */
1462 uint8_t host_phys_bits_limit
;
1464 /* Stop SMI delivery for migration compatibility with old machines */
1465 bool kvm_no_smi_migration
;
1467 /* Number of physical address bits supported */
1470 /* in order to simplify APIC support, we leave this pointer to the
1472 struct DeviceState
*apic_state
;
1473 struct MemoryRegion
*cpu_as_root
, *cpu_as_mem
, *smram
;
1474 Notifier machine_done
;
1476 struct kvm_msrs
*kvm_msr_buf
;
1478 int32_t node_id
; /* NUMA node this CPU belongs to */
1487 #ifndef CONFIG_USER_ONLY
1488 extern struct VMStateDescription vmstate_x86_cpu
;
1492 * x86_cpu_do_interrupt:
1493 * @cpu: vCPU the interrupt is to be handled by.
1495 void x86_cpu_do_interrupt(CPUState
*cpu
);
1496 bool x86_cpu_exec_interrupt(CPUState
*cpu
, int int_req
);
1497 int x86_cpu_pending_interrupt(CPUState
*cs
, int interrupt_request
);
1499 int x86_cpu_write_elf64_note(WriteCoreDumpFunction f
, CPUState
*cpu
,
1500 int cpuid
, void *opaque
);
1501 int x86_cpu_write_elf32_note(WriteCoreDumpFunction f
, CPUState
*cpu
,
1502 int cpuid
, void *opaque
);
1503 int x86_cpu_write_elf64_qemunote(WriteCoreDumpFunction f
, CPUState
*cpu
,
1505 int x86_cpu_write_elf32_qemunote(WriteCoreDumpFunction f
, CPUState
*cpu
,
1508 void x86_cpu_get_memory_mapping(CPUState
*cpu
, MemoryMappingList
*list
,
1511 void x86_cpu_dump_state(CPUState
*cs
, FILE *f
, int flags
);
1513 hwaddr
x86_cpu_get_phys_page_debug(CPUState
*cpu
, vaddr addr
);
1515 int x86_cpu_gdb_read_register(CPUState
*cpu
, uint8_t *buf
, int reg
);
1516 int x86_cpu_gdb_write_register(CPUState
*cpu
, uint8_t *buf
, int reg
);
1518 void x86_cpu_exec_enter(CPUState
*cpu
);
1519 void x86_cpu_exec_exit(CPUState
*cpu
);
1521 void x86_cpu_list(void);
1522 int cpu_x86_support_mca_broadcast(CPUX86State
*env
);
1524 int cpu_get_pic_interrupt(CPUX86State
*s
);
1525 /* MSDOS compatibility mode FPU exception support */
1526 void cpu_set_ferr(CPUX86State
*s
);
1528 void cpu_sync_bndcs_hflags(CPUX86State
*env
);
1530 /* this function must always be used to load data in the segment
1531 cache: it synchronizes the hflags with the segment cache values */
1532 static inline void cpu_x86_load_seg_cache(CPUX86State
*env
,
1533 int seg_reg
, unsigned int selector
,
1539 unsigned int new_hflags
;
1541 sc
= &env
->segs
[seg_reg
];
1542 sc
->selector
= selector
;
1547 /* update the hidden flags */
1549 if (seg_reg
== R_CS
) {
1550 #ifdef TARGET_X86_64
1551 if ((env
->hflags
& HF_LMA_MASK
) && (flags
& DESC_L_MASK
)) {
1553 env
->hflags
|= HF_CS32_MASK
| HF_SS32_MASK
| HF_CS64_MASK
;
1554 env
->hflags
&= ~(HF_ADDSEG_MASK
);
1558 /* legacy / compatibility case */
1559 new_hflags
= (env
->segs
[R_CS
].flags
& DESC_B_MASK
)
1560 >> (DESC_B_SHIFT
- HF_CS32_SHIFT
);
1561 env
->hflags
= (env
->hflags
& ~(HF_CS32_MASK
| HF_CS64_MASK
)) |
1565 if (seg_reg
== R_SS
) {
1566 int cpl
= (flags
>> DESC_DPL_SHIFT
) & 3;
1567 #if HF_CPL_MASK != 3
1568 #error HF_CPL_MASK is hardcoded
1570 env
->hflags
= (env
->hflags
& ~HF_CPL_MASK
) | cpl
;
1571 /* Possibly switch between BNDCFGS and BNDCFGU */
1572 cpu_sync_bndcs_hflags(env
);
1574 new_hflags
= (env
->segs
[R_SS
].flags
& DESC_B_MASK
)
1575 >> (DESC_B_SHIFT
- HF_SS32_SHIFT
);
1576 if (env
->hflags
& HF_CS64_MASK
) {
1577 /* zero base assumed for DS, ES and SS in long mode */
1578 } else if (!(env
->cr
[0] & CR0_PE_MASK
) ||
1579 (env
->eflags
& VM_MASK
) ||
1580 !(env
->hflags
& HF_CS32_MASK
)) {
1581 /* XXX: try to avoid this test. The problem comes from the
1582 fact that is real mode or vm86 mode we only modify the
1583 'base' and 'selector' fields of the segment cache to go
1584 faster. A solution may be to force addseg to one in
1585 translate-i386.c. */
1586 new_hflags
|= HF_ADDSEG_MASK
;
1588 new_hflags
|= ((env
->segs
[R_DS
].base
|
1589 env
->segs
[R_ES
].base
|
1590 env
->segs
[R_SS
].base
) != 0) <<
1593 env
->hflags
= (env
->hflags
&
1594 ~(HF_SS32_MASK
| HF_ADDSEG_MASK
)) | new_hflags
;
1598 static inline void cpu_x86_load_seg_cache_sipi(X86CPU
*cpu
,
1599 uint8_t sipi_vector
)
1601 CPUState
*cs
= CPU(cpu
);
1602 CPUX86State
*env
= &cpu
->env
;
1605 cpu_x86_load_seg_cache(env
, R_CS
, sipi_vector
<< 8,
1607 env
->segs
[R_CS
].limit
,
1608 env
->segs
[R_CS
].flags
);
1612 int cpu_x86_get_descr_debug(CPUX86State
*env
, unsigned int selector
,
1613 target_ulong
*base
, unsigned int *limit
,
1614 unsigned int *flags
);
1617 /* used for debug or cpu save/restore */
1620 /* the following helpers are only usable in user mode simulation as
1621 they can trigger unexpected exceptions */
1622 void cpu_x86_load_seg(CPUX86State
*s
, int seg_reg
, int selector
);
1623 void cpu_x86_fsave(CPUX86State
*s
, target_ulong ptr
, int data32
);
1624 void cpu_x86_frstor(CPUX86State
*s
, target_ulong ptr
, int data32
);
1625 void cpu_x86_fxsave(CPUX86State
*s
, target_ulong ptr
);
1626 void cpu_x86_fxrstor(CPUX86State
*s
, target_ulong ptr
);
1628 /* you can call this signal handler from your SIGBUS and SIGSEGV
1629 signal handlers to inform the virtual CPU of exceptions. non zero
1630 is returned if the signal was handled by the virtual CPU. */
1631 int cpu_x86_signal_handler(int host_signum
, void *pinfo
,
1635 void cpu_x86_cpuid(CPUX86State
*env
, uint32_t index
, uint32_t count
,
1636 uint32_t *eax
, uint32_t *ebx
,
1637 uint32_t *ecx
, uint32_t *edx
);
1638 void cpu_clear_apic_feature(CPUX86State
*env
);
1639 void host_cpuid(uint32_t function
, uint32_t count
,
1640 uint32_t *eax
, uint32_t *ebx
, uint32_t *ecx
, uint32_t *edx
);
1641 void host_vendor_fms(char *vendor
, int *family
, int *model
, int *stepping
);
1644 bool x86_cpu_tlb_fill(CPUState
*cs
, vaddr address
, int size
,
1645 MMUAccessType access_type
, int mmu_idx
,
1646 bool probe
, uintptr_t retaddr
);
1647 void x86_cpu_set_a20(X86CPU
*cpu
, int a20_state
);
1649 #ifndef CONFIG_USER_ONLY
1650 static inline int x86_asidx_from_attrs(CPUState
*cs
, MemTxAttrs attrs
)
1652 return !!attrs
.secure
;
1655 static inline AddressSpace
*cpu_addressspace(CPUState
*cs
, MemTxAttrs attrs
)
1657 return cpu_get_address_space(cs
, cpu_asidx_from_attrs(cs
, attrs
));
1660 uint8_t x86_ldub_phys(CPUState
*cs
, hwaddr addr
);
1661 uint32_t x86_lduw_phys(CPUState
*cs
, hwaddr addr
);
1662 uint32_t x86_ldl_phys(CPUState
*cs
, hwaddr addr
);
1663 uint64_t x86_ldq_phys(CPUState
*cs
, hwaddr addr
);
1664 void x86_stb_phys(CPUState
*cs
, hwaddr addr
, uint8_t val
);
1665 void x86_stl_phys_notdirty(CPUState
*cs
, hwaddr addr
, uint32_t val
);
1666 void x86_stw_phys(CPUState
*cs
, hwaddr addr
, uint32_t val
);
1667 void x86_stl_phys(CPUState
*cs
, hwaddr addr
, uint32_t val
);
1668 void x86_stq_phys(CPUState
*cs
, hwaddr addr
, uint64_t val
);
1671 void breakpoint_handler(CPUState
*cs
);
1673 /* will be suppressed */
1674 void cpu_x86_update_cr0(CPUX86State
*env
, uint32_t new_cr0
);
1675 void cpu_x86_update_cr3(CPUX86State
*env
, target_ulong new_cr3
);
1676 void cpu_x86_update_cr4(CPUX86State
*env
, uint32_t new_cr4
);
1677 void cpu_x86_update_dr7(CPUX86State
*env
, uint32_t new_dr7
);
1680 uint64_t cpu_get_tsc(CPUX86State
*env
);
1682 /* XXX: This value should match the one returned by CPUID
1684 # if defined(TARGET_X86_64)
1685 # define TCG_PHYS_ADDR_BITS 40
1687 # define TCG_PHYS_ADDR_BITS 36
1690 #define PHYS_ADDR_MASK MAKE_64BIT_MASK(0, TCG_PHYS_ADDR_BITS)
1692 #define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU
1693 #define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX)
1694 #define CPU_RESOLVING_TYPE TYPE_X86_CPU
1696 #ifdef TARGET_X86_64
1697 #define TARGET_DEFAULT_CPU_TYPE X86_CPU_TYPE_NAME("qemu64")
1699 #define TARGET_DEFAULT_CPU_TYPE X86_CPU_TYPE_NAME("qemu32")
1702 #define cpu_signal_handler cpu_x86_signal_handler
1703 #define cpu_list x86_cpu_list
1705 /* MMU modes definitions */
1706 #define MMU_MODE0_SUFFIX _ksmap
1707 #define MMU_MODE1_SUFFIX _user
1708 #define MMU_MODE2_SUFFIX _knosmap /* SMAP disabled or CPL<3 && AC=1 */
1709 #define MMU_KSMAP_IDX 0
1710 #define MMU_USER_IDX 1
1711 #define MMU_KNOSMAP_IDX 2
1712 static inline int cpu_mmu_index(CPUX86State
*env
, bool ifetch
)
1714 return (env
->hflags
& HF_CPL_MASK
) == 3 ? MMU_USER_IDX
:
1715 (!(env
->hflags
& HF_SMAP_MASK
) || (env
->eflags
& AC_MASK
))
1716 ? MMU_KNOSMAP_IDX
: MMU_KSMAP_IDX
;
1719 static inline int cpu_mmu_index_kernel(CPUX86State
*env
)
1721 return !(env
->hflags
& HF_SMAP_MASK
) ? MMU_KNOSMAP_IDX
:
1722 ((env
->hflags
& HF_CPL_MASK
) < 3 && (env
->eflags
& AC_MASK
))
1723 ? MMU_KNOSMAP_IDX
: MMU_KSMAP_IDX
;
1726 #define CC_DST (env->cc_dst)
1727 #define CC_SRC (env->cc_src)
1728 #define CC_SRC2 (env->cc_src2)
1729 #define CC_OP (env->cc_op)
1731 /* n must be a constant to be efficient */
1732 static inline target_long
lshift(target_long x
, int n
)
1742 #define FT0 (env->ft0)
1743 #define ST0 (env->fpregs[env->fpstt].d)
1744 #define ST(n) (env->fpregs[(env->fpstt + (n)) & 7].d)
1748 void tcg_x86_init(void);
1750 typedef CPUX86State CPUArchState
;
1751 typedef X86CPU ArchCPU
;
1753 #include "exec/cpu-all.h"
1756 #if !defined(CONFIG_USER_ONLY)
1757 #include "hw/i386/apic.h"
1760 static inline void cpu_get_tb_cpu_state(CPUX86State
*env
, target_ulong
*pc
,
1761 target_ulong
*cs_base
, uint32_t *flags
)
1763 *cs_base
= env
->segs
[R_CS
].base
;
1764 *pc
= *cs_base
+ env
->eip
;
1765 *flags
= env
->hflags
|
1766 (env
->eflags
& (IOPL_MASK
| TF_MASK
| RF_MASK
| VM_MASK
| AC_MASK
));
1769 void do_cpu_init(X86CPU
*cpu
);
1770 void do_cpu_sipi(X86CPU
*cpu
);
1772 #define MCE_INJECT_BROADCAST 1
1773 #define MCE_INJECT_UNCOND_AO 2
1775 void cpu_x86_inject_mce(Monitor
*mon
, X86CPU
*cpu
, int bank
,
1776 uint64_t status
, uint64_t mcg_status
, uint64_t addr
,
1777 uint64_t misc
, int flags
);
1780 void QEMU_NORETURN
raise_exception(CPUX86State
*env
, int exception_index
);
1781 void QEMU_NORETURN
raise_exception_ra(CPUX86State
*env
, int exception_index
,
1783 void QEMU_NORETURN
raise_exception_err(CPUX86State
*env
, int exception_index
,
1785 void QEMU_NORETURN
raise_exception_err_ra(CPUX86State
*env
, int exception_index
,
1786 int error_code
, uintptr_t retaddr
);
1787 void QEMU_NORETURN
raise_interrupt(CPUX86State
*nenv
, int intno
, int is_int
,
1788 int error_code
, int next_eip_addend
);
1791 extern const uint8_t parity_table
[256];
1792 uint32_t cpu_cc_compute_all(CPUX86State
*env1
, int op
);
1794 static inline uint32_t cpu_compute_eflags(CPUX86State
*env
)
1796 uint32_t eflags
= env
->eflags
;
1797 if (tcg_enabled()) {
1798 eflags
|= cpu_cc_compute_all(env
, CC_OP
) | (env
->df
& DF_MASK
);
1803 /* NOTE: the translator must set DisasContext.cc_op to CC_OP_EFLAGS
1804 * after generating a call to a helper that uses this.
1806 static inline void cpu_load_eflags(CPUX86State
*env
, int eflags
,
1809 CC_SRC
= eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
1810 CC_OP
= CC_OP_EFLAGS
;
1811 env
->df
= 1 - (2 * ((eflags
>> 10) & 1));
1812 env
->eflags
= (env
->eflags
& ~update_mask
) |
1813 (eflags
& update_mask
) | 0x2;
1816 /* load efer and update the corresponding hflags. XXX: do consistency
1817 checks with cpuid bits? */
1818 static inline void cpu_load_efer(CPUX86State
*env
, uint64_t val
)
1821 env
->hflags
&= ~(HF_LMA_MASK
| HF_SVME_MASK
);
1822 if (env
->efer
& MSR_EFER_LMA
) {
1823 env
->hflags
|= HF_LMA_MASK
;
1825 if (env
->efer
& MSR_EFER_SVME
) {
1826 env
->hflags
|= HF_SVME_MASK
;
1830 static inline MemTxAttrs
cpu_get_mem_attrs(CPUX86State
*env
)
1832 return ((MemTxAttrs
) { .secure
= (env
->hflags
& HF_SMM_MASK
) != 0 });
1835 static inline int32_t x86_get_a20_mask(CPUX86State
*env
)
1837 if (env
->hflags
& HF_SMM_MASK
) {
1840 return env
->a20_mask
;
1845 void update_fp_status(CPUX86State
*env
);
1846 void update_mxcsr_status(CPUX86State
*env
);
1848 static inline void cpu_set_mxcsr(CPUX86State
*env
, uint32_t mxcsr
)
1851 if (tcg_enabled()) {
1852 update_mxcsr_status(env
);
1856 static inline void cpu_set_fpuc(CPUX86State
*env
, uint16_t fpuc
)
1859 if (tcg_enabled()) {
1860 update_fp_status(env
);
1865 void helper_lock_init(void);
1868 void cpu_svm_check_intercept_param(CPUX86State
*env1
, uint32_t type
,
1869 uint64_t param
, uintptr_t retaddr
);
1870 void QEMU_NORETURN
cpu_vmexit(CPUX86State
*nenv
, uint32_t exit_code
,
1871 uint64_t exit_info_1
, uintptr_t retaddr
);
1872 void do_vmexit(CPUX86State
*env
, uint32_t exit_code
, uint64_t exit_info_1
);
1875 void do_interrupt_x86_hardirq(CPUX86State
*env
, int intno
, int is_hw
);
1878 void do_smm_enter(X86CPU
*cpu
);
1881 void cpu_report_tpr_access(CPUX86State
*env
, TPRAccess access
);
1882 void apic_handle_tpr_access_report(DeviceState
*d
, target_ulong ip
,
1886 /* Change the value of a KVM-specific default
1888 * If value is NULL, no default will be set and the original
1889 * value from the CPU model table will be kept.
1891 * It is valid to call this function only for properties that
1892 * are already present in the kvm_default_props table.
1894 void x86_cpu_change_kvm_default(const char *prop
, const char *value
);
1896 /* Return name of 32-bit register, from a R_* constant */
1897 const char *get_register_name_32(unsigned int reg
);
1899 void enable_compat_apic_id_mode(void);
1901 #define APIC_DEFAULT_ADDRESS 0xfee00000
1902 #define APIC_SPACE_SIZE 0x100000
1904 void x86_cpu_dump_local_apic_state(CPUState
*cs
, int flags
);
1907 bool cpu_is_bsp(X86CPU
*cpu
);
1909 void x86_cpu_xrstor_all_areas(X86CPU
*cpu
, const X86XSaveArea
*buf
);
1910 void x86_cpu_xsave_all_areas(X86CPU
*cpu
, X86XSaveArea
*buf
);
1911 void x86_update_hflags(CPUX86State
* env
);
1913 static inline bool hyperv_feat_enabled(X86CPU
*cpu
, int feat
)
1915 return !!(cpu
->hyperv_features
& BIT(feat
));
1918 #endif /* I386_CPU_H */