]> git.proxmox.com Git - mirror_qemu.git/blob - target/i386/cpu.h
target/i386: Add new bit definitions of MSR_IA32_ARCH_CAPABILITIES
[mirror_qemu.git] / target / i386 / cpu.h
1 /*
2 * i386 virtual CPU header
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #ifndef I386_CPU_H
21 #define I386_CPU_H
22
23 #include "sysemu/tcg.h"
24 #include "cpu-qom.h"
25 #include "hyperv-proto.h"
26 #include "exec/cpu-defs.h"
27 #include "qapi/qapi-types-common.h"
28
29 /* The x86 has a strong memory model with some store-after-load re-ordering */
30 #define TCG_GUEST_DEFAULT_MO (TCG_MO_ALL & ~TCG_MO_ST_LD)
31
32 /* Maximum instruction code size */
33 #define TARGET_MAX_INSN_SIZE 16
34
35 /* support for self modifying code even if the modified instruction is
36 close to the modifying instruction */
37 #define TARGET_HAS_PRECISE_SMC
38
39 #ifdef TARGET_X86_64
40 #define I386_ELF_MACHINE EM_X86_64
41 #define ELF_MACHINE_UNAME "x86_64"
42 #else
43 #define I386_ELF_MACHINE EM_386
44 #define ELF_MACHINE_UNAME "i686"
45 #endif
46
47 enum {
48 R_EAX = 0,
49 R_ECX = 1,
50 R_EDX = 2,
51 R_EBX = 3,
52 R_ESP = 4,
53 R_EBP = 5,
54 R_ESI = 6,
55 R_EDI = 7,
56 R_R8 = 8,
57 R_R9 = 9,
58 R_R10 = 10,
59 R_R11 = 11,
60 R_R12 = 12,
61 R_R13 = 13,
62 R_R14 = 14,
63 R_R15 = 15,
64
65 R_AL = 0,
66 R_CL = 1,
67 R_DL = 2,
68 R_BL = 3,
69 R_AH = 4,
70 R_CH = 5,
71 R_DH = 6,
72 R_BH = 7,
73 };
74
75 typedef enum X86Seg {
76 R_ES = 0,
77 R_CS = 1,
78 R_SS = 2,
79 R_DS = 3,
80 R_FS = 4,
81 R_GS = 5,
82 R_LDTR = 6,
83 R_TR = 7,
84 } X86Seg;
85
86 /* segment descriptor fields */
87 #define DESC_G_SHIFT 23
88 #define DESC_G_MASK (1 << DESC_G_SHIFT)
89 #define DESC_B_SHIFT 22
90 #define DESC_B_MASK (1 << DESC_B_SHIFT)
91 #define DESC_L_SHIFT 21 /* x86_64 only : 64 bit code segment */
92 #define DESC_L_MASK (1 << DESC_L_SHIFT)
93 #define DESC_AVL_SHIFT 20
94 #define DESC_AVL_MASK (1 << DESC_AVL_SHIFT)
95 #define DESC_P_SHIFT 15
96 #define DESC_P_MASK (1 << DESC_P_SHIFT)
97 #define DESC_DPL_SHIFT 13
98 #define DESC_DPL_MASK (3 << DESC_DPL_SHIFT)
99 #define DESC_S_SHIFT 12
100 #define DESC_S_MASK (1 << DESC_S_SHIFT)
101 #define DESC_TYPE_SHIFT 8
102 #define DESC_TYPE_MASK (15 << DESC_TYPE_SHIFT)
103 #define DESC_A_MASK (1 << 8)
104
105 #define DESC_CS_MASK (1 << 11) /* 1=code segment 0=data segment */
106 #define DESC_C_MASK (1 << 10) /* code: conforming */
107 #define DESC_R_MASK (1 << 9) /* code: readable */
108
109 #define DESC_E_MASK (1 << 10) /* data: expansion direction */
110 #define DESC_W_MASK (1 << 9) /* data: writable */
111
112 #define DESC_TSS_BUSY_MASK (1 << 9)
113
114 /* eflags masks */
115 #define CC_C 0x0001
116 #define CC_P 0x0004
117 #define CC_A 0x0010
118 #define CC_Z 0x0040
119 #define CC_S 0x0080
120 #define CC_O 0x0800
121
122 #define TF_SHIFT 8
123 #define IOPL_SHIFT 12
124 #define VM_SHIFT 17
125
126 #define TF_MASK 0x00000100
127 #define IF_MASK 0x00000200
128 #define DF_MASK 0x00000400
129 #define IOPL_MASK 0x00003000
130 #define NT_MASK 0x00004000
131 #define RF_MASK 0x00010000
132 #define VM_MASK 0x00020000
133 #define AC_MASK 0x00040000
134 #define VIF_MASK 0x00080000
135 #define VIP_MASK 0x00100000
136 #define ID_MASK 0x00200000
137
138 /* hidden flags - used internally by qemu to represent additional cpu
139 states. Only the INHIBIT_IRQ, SMM and SVMI are not redundant. We
140 avoid using the IOPL_MASK, TF_MASK, VM_MASK and AC_MASK bit
141 positions to ease oring with eflags. */
142 /* current cpl */
143 #define HF_CPL_SHIFT 0
144 /* true if hardware interrupts must be disabled for next instruction */
145 #define HF_INHIBIT_IRQ_SHIFT 3
146 /* 16 or 32 segments */
147 #define HF_CS32_SHIFT 4
148 #define HF_SS32_SHIFT 5
149 /* zero base for DS, ES and SS : can be '0' only in 32 bit CS segment */
150 #define HF_ADDSEG_SHIFT 6
151 /* copy of CR0.PE (protected mode) */
152 #define HF_PE_SHIFT 7
153 #define HF_TF_SHIFT 8 /* must be same as eflags */
154 #define HF_MP_SHIFT 9 /* the order must be MP, EM, TS */
155 #define HF_EM_SHIFT 10
156 #define HF_TS_SHIFT 11
157 #define HF_IOPL_SHIFT 12 /* must be same as eflags */
158 #define HF_LMA_SHIFT 14 /* only used on x86_64: long mode active */
159 #define HF_CS64_SHIFT 15 /* only used on x86_64: 64 bit code segment */
160 #define HF_RF_SHIFT 16 /* must be same as eflags */
161 #define HF_VM_SHIFT 17 /* must be same as eflags */
162 #define HF_AC_SHIFT 18 /* must be same as eflags */
163 #define HF_SMM_SHIFT 19 /* CPU in SMM mode */
164 #define HF_SVME_SHIFT 20 /* SVME enabled (copy of EFER.SVME) */
165 #define HF_GUEST_SHIFT 21 /* SVM intercepts are active */
166 #define HF_OSFXSR_SHIFT 22 /* CR4.OSFXSR */
167 #define HF_SMAP_SHIFT 23 /* CR4.SMAP */
168 #define HF_IOBPT_SHIFT 24 /* an io breakpoint enabled */
169 #define HF_MPX_EN_SHIFT 25 /* MPX Enabled (CR4+XCR0+BNDCFGx) */
170 #define HF_MPX_IU_SHIFT 26 /* BND registers in-use */
171
172 #define HF_CPL_MASK (3 << HF_CPL_SHIFT)
173 #define HF_INHIBIT_IRQ_MASK (1 << HF_INHIBIT_IRQ_SHIFT)
174 #define HF_CS32_MASK (1 << HF_CS32_SHIFT)
175 #define HF_SS32_MASK (1 << HF_SS32_SHIFT)
176 #define HF_ADDSEG_MASK (1 << HF_ADDSEG_SHIFT)
177 #define HF_PE_MASK (1 << HF_PE_SHIFT)
178 #define HF_TF_MASK (1 << HF_TF_SHIFT)
179 #define HF_MP_MASK (1 << HF_MP_SHIFT)
180 #define HF_EM_MASK (1 << HF_EM_SHIFT)
181 #define HF_TS_MASK (1 << HF_TS_SHIFT)
182 #define HF_IOPL_MASK (3 << HF_IOPL_SHIFT)
183 #define HF_LMA_MASK (1 << HF_LMA_SHIFT)
184 #define HF_CS64_MASK (1 << HF_CS64_SHIFT)
185 #define HF_RF_MASK (1 << HF_RF_SHIFT)
186 #define HF_VM_MASK (1 << HF_VM_SHIFT)
187 #define HF_AC_MASK (1 << HF_AC_SHIFT)
188 #define HF_SMM_MASK (1 << HF_SMM_SHIFT)
189 #define HF_SVME_MASK (1 << HF_SVME_SHIFT)
190 #define HF_GUEST_MASK (1 << HF_GUEST_SHIFT)
191 #define HF_OSFXSR_MASK (1 << HF_OSFXSR_SHIFT)
192 #define HF_SMAP_MASK (1 << HF_SMAP_SHIFT)
193 #define HF_IOBPT_MASK (1 << HF_IOBPT_SHIFT)
194 #define HF_MPX_EN_MASK (1 << HF_MPX_EN_SHIFT)
195 #define HF_MPX_IU_MASK (1 << HF_MPX_IU_SHIFT)
196
197 /* hflags2 */
198
199 #define HF2_GIF_SHIFT 0 /* if set CPU takes interrupts */
200 #define HF2_HIF_SHIFT 1 /* value of IF_MASK when entering SVM */
201 #define HF2_NMI_SHIFT 2 /* CPU serving NMI */
202 #define HF2_VINTR_SHIFT 3 /* value of V_INTR_MASKING bit */
203 #define HF2_SMM_INSIDE_NMI_SHIFT 4 /* CPU serving SMI nested inside NMI */
204 #define HF2_MPX_PR_SHIFT 5 /* BNDCFGx.BNDPRESERVE */
205 #define HF2_NPT_SHIFT 6 /* Nested Paging enabled */
206 #define HF2_IGNNE_SHIFT 7 /* Ignore CR0.NE=0 */
207
208 #define HF2_GIF_MASK (1 << HF2_GIF_SHIFT)
209 #define HF2_HIF_MASK (1 << HF2_HIF_SHIFT)
210 #define HF2_NMI_MASK (1 << HF2_NMI_SHIFT)
211 #define HF2_VINTR_MASK (1 << HF2_VINTR_SHIFT)
212 #define HF2_SMM_INSIDE_NMI_MASK (1 << HF2_SMM_INSIDE_NMI_SHIFT)
213 #define HF2_MPX_PR_MASK (1 << HF2_MPX_PR_SHIFT)
214 #define HF2_NPT_MASK (1 << HF2_NPT_SHIFT)
215 #define HF2_IGNNE_MASK (1 << HF2_IGNNE_SHIFT)
216
217 #define CR0_PE_SHIFT 0
218 #define CR0_MP_SHIFT 1
219
220 #define CR0_PE_MASK (1U << 0)
221 #define CR0_MP_MASK (1U << 1)
222 #define CR0_EM_MASK (1U << 2)
223 #define CR0_TS_MASK (1U << 3)
224 #define CR0_ET_MASK (1U << 4)
225 #define CR0_NE_MASK (1U << 5)
226 #define CR0_WP_MASK (1U << 16)
227 #define CR0_AM_MASK (1U << 18)
228 #define CR0_PG_MASK (1U << 31)
229
230 #define CR4_VME_MASK (1U << 0)
231 #define CR4_PVI_MASK (1U << 1)
232 #define CR4_TSD_MASK (1U << 2)
233 #define CR4_DE_MASK (1U << 3)
234 #define CR4_PSE_MASK (1U << 4)
235 #define CR4_PAE_MASK (1U << 5)
236 #define CR4_MCE_MASK (1U << 6)
237 #define CR4_PGE_MASK (1U << 7)
238 #define CR4_PCE_MASK (1U << 8)
239 #define CR4_OSFXSR_SHIFT 9
240 #define CR4_OSFXSR_MASK (1U << CR4_OSFXSR_SHIFT)
241 #define CR4_OSXMMEXCPT_MASK (1U << 10)
242 #define CR4_LA57_MASK (1U << 12)
243 #define CR4_VMXE_MASK (1U << 13)
244 #define CR4_SMXE_MASK (1U << 14)
245 #define CR4_FSGSBASE_MASK (1U << 16)
246 #define CR4_PCIDE_MASK (1U << 17)
247 #define CR4_OSXSAVE_MASK (1U << 18)
248 #define CR4_SMEP_MASK (1U << 20)
249 #define CR4_SMAP_MASK (1U << 21)
250 #define CR4_PKE_MASK (1U << 22)
251
252 #define DR6_BD (1 << 13)
253 #define DR6_BS (1 << 14)
254 #define DR6_BT (1 << 15)
255 #define DR6_FIXED_1 0xffff0ff0
256
257 #define DR7_GD (1 << 13)
258 #define DR7_TYPE_SHIFT 16
259 #define DR7_LEN_SHIFT 18
260 #define DR7_FIXED_1 0x00000400
261 #define DR7_GLOBAL_BP_MASK 0xaa
262 #define DR7_LOCAL_BP_MASK 0x55
263 #define DR7_MAX_BP 4
264 #define DR7_TYPE_BP_INST 0x0
265 #define DR7_TYPE_DATA_WR 0x1
266 #define DR7_TYPE_IO_RW 0x2
267 #define DR7_TYPE_DATA_RW 0x3
268
269 #define PG_PRESENT_BIT 0
270 #define PG_RW_BIT 1
271 #define PG_USER_BIT 2
272 #define PG_PWT_BIT 3
273 #define PG_PCD_BIT 4
274 #define PG_ACCESSED_BIT 5
275 #define PG_DIRTY_BIT 6
276 #define PG_PSE_BIT 7
277 #define PG_GLOBAL_BIT 8
278 #define PG_PSE_PAT_BIT 12
279 #define PG_PKRU_BIT 59
280 #define PG_NX_BIT 63
281
282 #define PG_PRESENT_MASK (1 << PG_PRESENT_BIT)
283 #define PG_RW_MASK (1 << PG_RW_BIT)
284 #define PG_USER_MASK (1 << PG_USER_BIT)
285 #define PG_PWT_MASK (1 << PG_PWT_BIT)
286 #define PG_PCD_MASK (1 << PG_PCD_BIT)
287 #define PG_ACCESSED_MASK (1 << PG_ACCESSED_BIT)
288 #define PG_DIRTY_MASK (1 << PG_DIRTY_BIT)
289 #define PG_PSE_MASK (1 << PG_PSE_BIT)
290 #define PG_GLOBAL_MASK (1 << PG_GLOBAL_BIT)
291 #define PG_PSE_PAT_MASK (1 << PG_PSE_PAT_BIT)
292 #define PG_ADDRESS_MASK 0x000ffffffffff000LL
293 #define PG_HI_RSVD_MASK (PG_ADDRESS_MASK & ~PHYS_ADDR_MASK)
294 #define PG_HI_USER_MASK 0x7ff0000000000000LL
295 #define PG_PKRU_MASK (15ULL << PG_PKRU_BIT)
296 #define PG_NX_MASK (1ULL << PG_NX_BIT)
297
298 #define PG_ERROR_W_BIT 1
299
300 #define PG_ERROR_P_MASK 0x01
301 #define PG_ERROR_W_MASK (1 << PG_ERROR_W_BIT)
302 #define PG_ERROR_U_MASK 0x04
303 #define PG_ERROR_RSVD_MASK 0x08
304 #define PG_ERROR_I_D_MASK 0x10
305 #define PG_ERROR_PK_MASK 0x20
306
307 #define MCG_CTL_P (1ULL<<8) /* MCG_CAP register available */
308 #define MCG_SER_P (1ULL<<24) /* MCA recovery/new status bits */
309 #define MCG_LMCE_P (1ULL<<27) /* Local Machine Check Supported */
310
311 #define MCE_CAP_DEF (MCG_CTL_P|MCG_SER_P)
312 #define MCE_BANKS_DEF 10
313
314 #define MCG_CAP_BANKS_MASK 0xff
315
316 #define MCG_STATUS_RIPV (1ULL<<0) /* restart ip valid */
317 #define MCG_STATUS_EIPV (1ULL<<1) /* ip points to correct instruction */
318 #define MCG_STATUS_MCIP (1ULL<<2) /* machine check in progress */
319 #define MCG_STATUS_LMCE (1ULL<<3) /* Local MCE signaled */
320
321 #define MCG_EXT_CTL_LMCE_EN (1ULL<<0) /* Local MCE enabled */
322
323 #define MCI_STATUS_VAL (1ULL<<63) /* valid error */
324 #define MCI_STATUS_OVER (1ULL<<62) /* previous errors lost */
325 #define MCI_STATUS_UC (1ULL<<61) /* uncorrected error */
326 #define MCI_STATUS_EN (1ULL<<60) /* error enabled */
327 #define MCI_STATUS_MISCV (1ULL<<59) /* misc error reg. valid */
328 #define MCI_STATUS_ADDRV (1ULL<<58) /* addr reg. valid */
329 #define MCI_STATUS_PCC (1ULL<<57) /* processor context corrupt */
330 #define MCI_STATUS_S (1ULL<<56) /* Signaled machine check */
331 #define MCI_STATUS_AR (1ULL<<55) /* Action required */
332
333 /* MISC register defines */
334 #define MCM_ADDR_SEGOFF 0 /* segment offset */
335 #define MCM_ADDR_LINEAR 1 /* linear address */
336 #define MCM_ADDR_PHYS 2 /* physical address */
337 #define MCM_ADDR_MEM 3 /* memory address */
338 #define MCM_ADDR_GENERIC 7 /* generic */
339
340 #define MSR_IA32_TSC 0x10
341 #define MSR_IA32_APICBASE 0x1b
342 #define MSR_IA32_APICBASE_BSP (1<<8)
343 #define MSR_IA32_APICBASE_ENABLE (1<<11)
344 #define MSR_IA32_APICBASE_EXTD (1 << 10)
345 #define MSR_IA32_APICBASE_BASE (0xfffffU<<12)
346 #define MSR_IA32_FEATURE_CONTROL 0x0000003a
347 #define MSR_TSC_ADJUST 0x0000003b
348 #define MSR_IA32_SPEC_CTRL 0x48
349 #define MSR_VIRT_SSBD 0xc001011f
350 #define MSR_IA32_PRED_CMD 0x49
351 #define MSR_IA32_CORE_CAPABILITY 0xcf
352
353 #define MSR_IA32_ARCH_CAPABILITIES 0x10a
354 #define ARCH_CAP_TSX_CTRL_MSR (1<<7)
355
356 #define MSR_IA32_TSX_CTRL 0x122
357 #define MSR_IA32_TSCDEADLINE 0x6e0
358
359 #define FEATURE_CONTROL_LOCKED (1<<0)
360 #define FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX (1<<2)
361 #define FEATURE_CONTROL_LMCE (1<<20)
362
363 #define MSR_P6_PERFCTR0 0xc1
364
365 #define MSR_IA32_SMBASE 0x9e
366 #define MSR_SMI_COUNT 0x34
367 #define MSR_MTRRcap 0xfe
368 #define MSR_MTRRcap_VCNT 8
369 #define MSR_MTRRcap_FIXRANGE_SUPPORT (1 << 8)
370 #define MSR_MTRRcap_WC_SUPPORTED (1 << 10)
371
372 #define MSR_IA32_SYSENTER_CS 0x174
373 #define MSR_IA32_SYSENTER_ESP 0x175
374 #define MSR_IA32_SYSENTER_EIP 0x176
375
376 #define MSR_MCG_CAP 0x179
377 #define MSR_MCG_STATUS 0x17a
378 #define MSR_MCG_CTL 0x17b
379 #define MSR_MCG_EXT_CTL 0x4d0
380
381 #define MSR_P6_EVNTSEL0 0x186
382
383 #define MSR_IA32_PERF_STATUS 0x198
384
385 #define MSR_IA32_MISC_ENABLE 0x1a0
386 /* Indicates good rep/movs microcode on some processors: */
387 #define MSR_IA32_MISC_ENABLE_DEFAULT 1
388 #define MSR_IA32_MISC_ENABLE_MWAIT (1ULL << 18)
389
390 #define MSR_MTRRphysBase(reg) (0x200 + 2 * (reg))
391 #define MSR_MTRRphysMask(reg) (0x200 + 2 * (reg) + 1)
392
393 #define MSR_MTRRphysIndex(addr) ((((addr) & ~1u) - 0x200) / 2)
394
395 #define MSR_MTRRfix64K_00000 0x250
396 #define MSR_MTRRfix16K_80000 0x258
397 #define MSR_MTRRfix16K_A0000 0x259
398 #define MSR_MTRRfix4K_C0000 0x268
399 #define MSR_MTRRfix4K_C8000 0x269
400 #define MSR_MTRRfix4K_D0000 0x26a
401 #define MSR_MTRRfix4K_D8000 0x26b
402 #define MSR_MTRRfix4K_E0000 0x26c
403 #define MSR_MTRRfix4K_E8000 0x26d
404 #define MSR_MTRRfix4K_F0000 0x26e
405 #define MSR_MTRRfix4K_F8000 0x26f
406
407 #define MSR_PAT 0x277
408
409 #define MSR_MTRRdefType 0x2ff
410
411 #define MSR_CORE_PERF_FIXED_CTR0 0x309
412 #define MSR_CORE_PERF_FIXED_CTR1 0x30a
413 #define MSR_CORE_PERF_FIXED_CTR2 0x30b
414 #define MSR_CORE_PERF_FIXED_CTR_CTRL 0x38d
415 #define MSR_CORE_PERF_GLOBAL_STATUS 0x38e
416 #define MSR_CORE_PERF_GLOBAL_CTRL 0x38f
417 #define MSR_CORE_PERF_GLOBAL_OVF_CTRL 0x390
418
419 #define MSR_MC0_CTL 0x400
420 #define MSR_MC0_STATUS 0x401
421 #define MSR_MC0_ADDR 0x402
422 #define MSR_MC0_MISC 0x403
423
424 #define MSR_IA32_RTIT_OUTPUT_BASE 0x560
425 #define MSR_IA32_RTIT_OUTPUT_MASK 0x561
426 #define MSR_IA32_RTIT_CTL 0x570
427 #define MSR_IA32_RTIT_STATUS 0x571
428 #define MSR_IA32_RTIT_CR3_MATCH 0x572
429 #define MSR_IA32_RTIT_ADDR0_A 0x580
430 #define MSR_IA32_RTIT_ADDR0_B 0x581
431 #define MSR_IA32_RTIT_ADDR1_A 0x582
432 #define MSR_IA32_RTIT_ADDR1_B 0x583
433 #define MSR_IA32_RTIT_ADDR2_A 0x584
434 #define MSR_IA32_RTIT_ADDR2_B 0x585
435 #define MSR_IA32_RTIT_ADDR3_A 0x586
436 #define MSR_IA32_RTIT_ADDR3_B 0x587
437 #define MAX_RTIT_ADDRS 8
438
439 #define MSR_EFER 0xc0000080
440
441 #define MSR_EFER_SCE (1 << 0)
442 #define MSR_EFER_LME (1 << 8)
443 #define MSR_EFER_LMA (1 << 10)
444 #define MSR_EFER_NXE (1 << 11)
445 #define MSR_EFER_SVME (1 << 12)
446 #define MSR_EFER_FFXSR (1 << 14)
447
448 #define MSR_STAR 0xc0000081
449 #define MSR_LSTAR 0xc0000082
450 #define MSR_CSTAR 0xc0000083
451 #define MSR_FMASK 0xc0000084
452 #define MSR_FSBASE 0xc0000100
453 #define MSR_GSBASE 0xc0000101
454 #define MSR_KERNELGSBASE 0xc0000102
455 #define MSR_TSC_AUX 0xc0000103
456
457 #define MSR_VM_HSAVE_PA 0xc0010117
458
459 #define MSR_IA32_BNDCFGS 0x00000d90
460 #define MSR_IA32_XSS 0x00000da0
461 #define MSR_IA32_UMWAIT_CONTROL 0xe1
462
463 #define MSR_IA32_VMX_BASIC 0x00000480
464 #define MSR_IA32_VMX_PINBASED_CTLS 0x00000481
465 #define MSR_IA32_VMX_PROCBASED_CTLS 0x00000482
466 #define MSR_IA32_VMX_EXIT_CTLS 0x00000483
467 #define MSR_IA32_VMX_ENTRY_CTLS 0x00000484
468 #define MSR_IA32_VMX_MISC 0x00000485
469 #define MSR_IA32_VMX_CR0_FIXED0 0x00000486
470 #define MSR_IA32_VMX_CR0_FIXED1 0x00000487
471 #define MSR_IA32_VMX_CR4_FIXED0 0x00000488
472 #define MSR_IA32_VMX_CR4_FIXED1 0x00000489
473 #define MSR_IA32_VMX_VMCS_ENUM 0x0000048a
474 #define MSR_IA32_VMX_PROCBASED_CTLS2 0x0000048b
475 #define MSR_IA32_VMX_EPT_VPID_CAP 0x0000048c
476 #define MSR_IA32_VMX_TRUE_PINBASED_CTLS 0x0000048d
477 #define MSR_IA32_VMX_TRUE_PROCBASED_CTLS 0x0000048e
478 #define MSR_IA32_VMX_TRUE_EXIT_CTLS 0x0000048f
479 #define MSR_IA32_VMX_TRUE_ENTRY_CTLS 0x00000490
480 #define MSR_IA32_VMX_VMFUNC 0x00000491
481
482 #define XSTATE_FP_BIT 0
483 #define XSTATE_SSE_BIT 1
484 #define XSTATE_YMM_BIT 2
485 #define XSTATE_BNDREGS_BIT 3
486 #define XSTATE_BNDCSR_BIT 4
487 #define XSTATE_OPMASK_BIT 5
488 #define XSTATE_ZMM_Hi256_BIT 6
489 #define XSTATE_Hi16_ZMM_BIT 7
490 #define XSTATE_PKRU_BIT 9
491
492 #define XSTATE_FP_MASK (1ULL << XSTATE_FP_BIT)
493 #define XSTATE_SSE_MASK (1ULL << XSTATE_SSE_BIT)
494 #define XSTATE_YMM_MASK (1ULL << XSTATE_YMM_BIT)
495 #define XSTATE_BNDREGS_MASK (1ULL << XSTATE_BNDREGS_BIT)
496 #define XSTATE_BNDCSR_MASK (1ULL << XSTATE_BNDCSR_BIT)
497 #define XSTATE_OPMASK_MASK (1ULL << XSTATE_OPMASK_BIT)
498 #define XSTATE_ZMM_Hi256_MASK (1ULL << XSTATE_ZMM_Hi256_BIT)
499 #define XSTATE_Hi16_ZMM_MASK (1ULL << XSTATE_Hi16_ZMM_BIT)
500 #define XSTATE_PKRU_MASK (1ULL << XSTATE_PKRU_BIT)
501
502 /* CPUID feature words */
503 typedef enum FeatureWord {
504 FEAT_1_EDX, /* CPUID[1].EDX */
505 FEAT_1_ECX, /* CPUID[1].ECX */
506 FEAT_7_0_EBX, /* CPUID[EAX=7,ECX=0].EBX */
507 FEAT_7_0_ECX, /* CPUID[EAX=7,ECX=0].ECX */
508 FEAT_7_0_EDX, /* CPUID[EAX=7,ECX=0].EDX */
509 FEAT_7_1_EAX, /* CPUID[EAX=7,ECX=1].EAX */
510 FEAT_8000_0001_EDX, /* CPUID[8000_0001].EDX */
511 FEAT_8000_0001_ECX, /* CPUID[8000_0001].ECX */
512 FEAT_8000_0007_EDX, /* CPUID[8000_0007].EDX */
513 FEAT_8000_0008_EBX, /* CPUID[8000_0008].EBX */
514 FEAT_C000_0001_EDX, /* CPUID[C000_0001].EDX */
515 FEAT_KVM, /* CPUID[4000_0001].EAX (KVM_CPUID_FEATURES) */
516 FEAT_KVM_HINTS, /* CPUID[4000_0001].EDX */
517 FEAT_HYPERV_EAX, /* CPUID[4000_0003].EAX */
518 FEAT_HYPERV_EBX, /* CPUID[4000_0003].EBX */
519 FEAT_HYPERV_EDX, /* CPUID[4000_0003].EDX */
520 FEAT_HV_RECOMM_EAX, /* CPUID[4000_0004].EAX */
521 FEAT_HV_NESTED_EAX, /* CPUID[4000_000A].EAX */
522 FEAT_SVM, /* CPUID[8000_000A].EDX */
523 FEAT_XSAVE, /* CPUID[EAX=0xd,ECX=1].EAX */
524 FEAT_6_EAX, /* CPUID[6].EAX */
525 FEAT_XSAVE_COMP_LO, /* CPUID[EAX=0xd,ECX=0].EAX */
526 FEAT_XSAVE_COMP_HI, /* CPUID[EAX=0xd,ECX=0].EDX */
527 FEAT_ARCH_CAPABILITIES,
528 FEAT_CORE_CAPABILITY,
529 FEAT_VMX_PROCBASED_CTLS,
530 FEAT_VMX_SECONDARY_CTLS,
531 FEAT_VMX_PINBASED_CTLS,
532 FEAT_VMX_EXIT_CTLS,
533 FEAT_VMX_ENTRY_CTLS,
534 FEAT_VMX_MISC,
535 FEAT_VMX_EPT_VPID_CAPS,
536 FEAT_VMX_BASIC,
537 FEAT_VMX_VMFUNC,
538 FEATURE_WORDS,
539 } FeatureWord;
540
541 typedef uint64_t FeatureWordArray[FEATURE_WORDS];
542
543 /* cpuid_features bits */
544 #define CPUID_FP87 (1U << 0)
545 #define CPUID_VME (1U << 1)
546 #define CPUID_DE (1U << 2)
547 #define CPUID_PSE (1U << 3)
548 #define CPUID_TSC (1U << 4)
549 #define CPUID_MSR (1U << 5)
550 #define CPUID_PAE (1U << 6)
551 #define CPUID_MCE (1U << 7)
552 #define CPUID_CX8 (1U << 8)
553 #define CPUID_APIC (1U << 9)
554 #define CPUID_SEP (1U << 11) /* sysenter/sysexit */
555 #define CPUID_MTRR (1U << 12)
556 #define CPUID_PGE (1U << 13)
557 #define CPUID_MCA (1U << 14)
558 #define CPUID_CMOV (1U << 15)
559 #define CPUID_PAT (1U << 16)
560 #define CPUID_PSE36 (1U << 17)
561 #define CPUID_PN (1U << 18)
562 #define CPUID_CLFLUSH (1U << 19)
563 #define CPUID_DTS (1U << 21)
564 #define CPUID_ACPI (1U << 22)
565 #define CPUID_MMX (1U << 23)
566 #define CPUID_FXSR (1U << 24)
567 #define CPUID_SSE (1U << 25)
568 #define CPUID_SSE2 (1U << 26)
569 #define CPUID_SS (1U << 27)
570 #define CPUID_HT (1U << 28)
571 #define CPUID_TM (1U << 29)
572 #define CPUID_IA64 (1U << 30)
573 #define CPUID_PBE (1U << 31)
574
575 #define CPUID_EXT_SSE3 (1U << 0)
576 #define CPUID_EXT_PCLMULQDQ (1U << 1)
577 #define CPUID_EXT_DTES64 (1U << 2)
578 #define CPUID_EXT_MONITOR (1U << 3)
579 #define CPUID_EXT_DSCPL (1U << 4)
580 #define CPUID_EXT_VMX (1U << 5)
581 #define CPUID_EXT_SMX (1U << 6)
582 #define CPUID_EXT_EST (1U << 7)
583 #define CPUID_EXT_TM2 (1U << 8)
584 #define CPUID_EXT_SSSE3 (1U << 9)
585 #define CPUID_EXT_CID (1U << 10)
586 #define CPUID_EXT_FMA (1U << 12)
587 #define CPUID_EXT_CX16 (1U << 13)
588 #define CPUID_EXT_XTPR (1U << 14)
589 #define CPUID_EXT_PDCM (1U << 15)
590 #define CPUID_EXT_PCID (1U << 17)
591 #define CPUID_EXT_DCA (1U << 18)
592 #define CPUID_EXT_SSE41 (1U << 19)
593 #define CPUID_EXT_SSE42 (1U << 20)
594 #define CPUID_EXT_X2APIC (1U << 21)
595 #define CPUID_EXT_MOVBE (1U << 22)
596 #define CPUID_EXT_POPCNT (1U << 23)
597 #define CPUID_EXT_TSC_DEADLINE_TIMER (1U << 24)
598 #define CPUID_EXT_AES (1U << 25)
599 #define CPUID_EXT_XSAVE (1U << 26)
600 #define CPUID_EXT_OSXSAVE (1U << 27)
601 #define CPUID_EXT_AVX (1U << 28)
602 #define CPUID_EXT_F16C (1U << 29)
603 #define CPUID_EXT_RDRAND (1U << 30)
604 #define CPUID_EXT_HYPERVISOR (1U << 31)
605
606 #define CPUID_EXT2_FPU (1U << 0)
607 #define CPUID_EXT2_VME (1U << 1)
608 #define CPUID_EXT2_DE (1U << 2)
609 #define CPUID_EXT2_PSE (1U << 3)
610 #define CPUID_EXT2_TSC (1U << 4)
611 #define CPUID_EXT2_MSR (1U << 5)
612 #define CPUID_EXT2_PAE (1U << 6)
613 #define CPUID_EXT2_MCE (1U << 7)
614 #define CPUID_EXT2_CX8 (1U << 8)
615 #define CPUID_EXT2_APIC (1U << 9)
616 #define CPUID_EXT2_SYSCALL (1U << 11)
617 #define CPUID_EXT2_MTRR (1U << 12)
618 #define CPUID_EXT2_PGE (1U << 13)
619 #define CPUID_EXT2_MCA (1U << 14)
620 #define CPUID_EXT2_CMOV (1U << 15)
621 #define CPUID_EXT2_PAT (1U << 16)
622 #define CPUID_EXT2_PSE36 (1U << 17)
623 #define CPUID_EXT2_MP (1U << 19)
624 #define CPUID_EXT2_NX (1U << 20)
625 #define CPUID_EXT2_MMXEXT (1U << 22)
626 #define CPUID_EXT2_MMX (1U << 23)
627 #define CPUID_EXT2_FXSR (1U << 24)
628 #define CPUID_EXT2_FFXSR (1U << 25)
629 #define CPUID_EXT2_PDPE1GB (1U << 26)
630 #define CPUID_EXT2_RDTSCP (1U << 27)
631 #define CPUID_EXT2_LM (1U << 29)
632 #define CPUID_EXT2_3DNOWEXT (1U << 30)
633 #define CPUID_EXT2_3DNOW (1U << 31)
634
635 /* CPUID[8000_0001].EDX bits that are aliase of CPUID[1].EDX bits on AMD CPUs */
636 #define CPUID_EXT2_AMD_ALIASES (CPUID_EXT2_FPU | CPUID_EXT2_VME | \
637 CPUID_EXT2_DE | CPUID_EXT2_PSE | \
638 CPUID_EXT2_TSC | CPUID_EXT2_MSR | \
639 CPUID_EXT2_PAE | CPUID_EXT2_MCE | \
640 CPUID_EXT2_CX8 | CPUID_EXT2_APIC | \
641 CPUID_EXT2_MTRR | CPUID_EXT2_PGE | \
642 CPUID_EXT2_MCA | CPUID_EXT2_CMOV | \
643 CPUID_EXT2_PAT | CPUID_EXT2_PSE36 | \
644 CPUID_EXT2_MMX | CPUID_EXT2_FXSR)
645
646 #define CPUID_EXT3_LAHF_LM (1U << 0)
647 #define CPUID_EXT3_CMP_LEG (1U << 1)
648 #define CPUID_EXT3_SVM (1U << 2)
649 #define CPUID_EXT3_EXTAPIC (1U << 3)
650 #define CPUID_EXT3_CR8LEG (1U << 4)
651 #define CPUID_EXT3_ABM (1U << 5)
652 #define CPUID_EXT3_SSE4A (1U << 6)
653 #define CPUID_EXT3_MISALIGNSSE (1U << 7)
654 #define CPUID_EXT3_3DNOWPREFETCH (1U << 8)
655 #define CPUID_EXT3_OSVW (1U << 9)
656 #define CPUID_EXT3_IBS (1U << 10)
657 #define CPUID_EXT3_XOP (1U << 11)
658 #define CPUID_EXT3_SKINIT (1U << 12)
659 #define CPUID_EXT3_WDT (1U << 13)
660 #define CPUID_EXT3_LWP (1U << 15)
661 #define CPUID_EXT3_FMA4 (1U << 16)
662 #define CPUID_EXT3_TCE (1U << 17)
663 #define CPUID_EXT3_NODEID (1U << 19)
664 #define CPUID_EXT3_TBM (1U << 21)
665 #define CPUID_EXT3_TOPOEXT (1U << 22)
666 #define CPUID_EXT3_PERFCORE (1U << 23)
667 #define CPUID_EXT3_PERFNB (1U << 24)
668
669 #define CPUID_SVM_NPT (1U << 0)
670 #define CPUID_SVM_LBRV (1U << 1)
671 #define CPUID_SVM_SVMLOCK (1U << 2)
672 #define CPUID_SVM_NRIPSAVE (1U << 3)
673 #define CPUID_SVM_TSCSCALE (1U << 4)
674 #define CPUID_SVM_VMCBCLEAN (1U << 5)
675 #define CPUID_SVM_FLUSHASID (1U << 6)
676 #define CPUID_SVM_DECODEASSIST (1U << 7)
677 #define CPUID_SVM_PAUSEFILTER (1U << 10)
678 #define CPUID_SVM_PFTHRESHOLD (1U << 12)
679
680 /* Support RDFSBASE/RDGSBASE/WRFSBASE/WRGSBASE */
681 #define CPUID_7_0_EBX_FSGSBASE (1U << 0)
682 /* 1st Group of Advanced Bit Manipulation Extensions */
683 #define CPUID_7_0_EBX_BMI1 (1U << 3)
684 /* Hardware Lock Elision */
685 #define CPUID_7_0_EBX_HLE (1U << 4)
686 /* Intel Advanced Vector Extensions 2 */
687 #define CPUID_7_0_EBX_AVX2 (1U << 5)
688 /* Supervisor-mode Execution Prevention */
689 #define CPUID_7_0_EBX_SMEP (1U << 7)
690 /* 2nd Group of Advanced Bit Manipulation Extensions */
691 #define CPUID_7_0_EBX_BMI2 (1U << 8)
692 /* Enhanced REP MOVSB/STOSB */
693 #define CPUID_7_0_EBX_ERMS (1U << 9)
694 /* Invalidate Process-Context Identifier */
695 #define CPUID_7_0_EBX_INVPCID (1U << 10)
696 /* Restricted Transactional Memory */
697 #define CPUID_7_0_EBX_RTM (1U << 11)
698 /* Memory Protection Extension */
699 #define CPUID_7_0_EBX_MPX (1U << 14)
700 /* AVX-512 Foundation */
701 #define CPUID_7_0_EBX_AVX512F (1U << 16)
702 /* AVX-512 Doubleword & Quadword Instruction */
703 #define CPUID_7_0_EBX_AVX512DQ (1U << 17)
704 /* Read Random SEED */
705 #define CPUID_7_0_EBX_RDSEED (1U << 18)
706 /* ADCX and ADOX instructions */
707 #define CPUID_7_0_EBX_ADX (1U << 19)
708 /* Supervisor Mode Access Prevention */
709 #define CPUID_7_0_EBX_SMAP (1U << 20)
710 /* AVX-512 Integer Fused Multiply Add */
711 #define CPUID_7_0_EBX_AVX512IFMA (1U << 21)
712 /* Persistent Commit */
713 #define CPUID_7_0_EBX_PCOMMIT (1U << 22)
714 /* Flush a Cache Line Optimized */
715 #define CPUID_7_0_EBX_CLFLUSHOPT (1U << 23)
716 /* Cache Line Write Back */
717 #define CPUID_7_0_EBX_CLWB (1U << 24)
718 /* Intel Processor Trace */
719 #define CPUID_7_0_EBX_INTEL_PT (1U << 25)
720 /* AVX-512 Prefetch */
721 #define CPUID_7_0_EBX_AVX512PF (1U << 26)
722 /* AVX-512 Exponential and Reciprocal */
723 #define CPUID_7_0_EBX_AVX512ER (1U << 27)
724 /* AVX-512 Conflict Detection */
725 #define CPUID_7_0_EBX_AVX512CD (1U << 28)
726 /* SHA1/SHA256 Instruction Extensions */
727 #define CPUID_7_0_EBX_SHA_NI (1U << 29)
728 /* AVX-512 Byte and Word Instructions */
729 #define CPUID_7_0_EBX_AVX512BW (1U << 30)
730 /* AVX-512 Vector Length Extensions */
731 #define CPUID_7_0_EBX_AVX512VL (1U << 31)
732
733 /* AVX-512 Vector Byte Manipulation Instruction */
734 #define CPUID_7_0_ECX_AVX512_VBMI (1U << 1)
735 /* User-Mode Instruction Prevention */
736 #define CPUID_7_0_ECX_UMIP (1U << 2)
737 /* Protection Keys for User-mode Pages */
738 #define CPUID_7_0_ECX_PKU (1U << 3)
739 /* OS Enable Protection Keys */
740 #define CPUID_7_0_ECX_OSPKE (1U << 4)
741 /* UMONITOR/UMWAIT/TPAUSE Instructions */
742 #define CPUID_7_0_ECX_WAITPKG (1U << 5)
743 /* Additional AVX-512 Vector Byte Manipulation Instruction */
744 #define CPUID_7_0_ECX_AVX512_VBMI2 (1U << 6)
745 /* Galois Field New Instructions */
746 #define CPUID_7_0_ECX_GFNI (1U << 8)
747 /* Vector AES Instructions */
748 #define CPUID_7_0_ECX_VAES (1U << 9)
749 /* Carry-Less Multiplication Quadword */
750 #define CPUID_7_0_ECX_VPCLMULQDQ (1U << 10)
751 /* Vector Neural Network Instructions */
752 #define CPUID_7_0_ECX_AVX512VNNI (1U << 11)
753 /* Support for VPOPCNT[B,W] and VPSHUFBITQMB */
754 #define CPUID_7_0_ECX_AVX512BITALG (1U << 12)
755 /* POPCNT for vectors of DW/QW */
756 #define CPUID_7_0_ECX_AVX512_VPOPCNTDQ (1U << 14)
757 /* 5-level Page Tables */
758 #define CPUID_7_0_ECX_LA57 (1U << 16)
759 /* Read Processor ID */
760 #define CPUID_7_0_ECX_RDPID (1U << 22)
761 /* Cache Line Demote Instruction */
762 #define CPUID_7_0_ECX_CLDEMOTE (1U << 25)
763 /* Move Doubleword as Direct Store Instruction */
764 #define CPUID_7_0_ECX_MOVDIRI (1U << 27)
765 /* Move 64 Bytes as Direct Store Instruction */
766 #define CPUID_7_0_ECX_MOVDIR64B (1U << 28)
767
768 /* AVX512 Neural Network Instructions */
769 #define CPUID_7_0_EDX_AVX512_4VNNIW (1U << 2)
770 /* AVX512 Multiply Accumulation Single Precision */
771 #define CPUID_7_0_EDX_AVX512_4FMAPS (1U << 3)
772 /* Speculation Control */
773 #define CPUID_7_0_EDX_SPEC_CTRL (1U << 26)
774 /* Single Thread Indirect Branch Predictors */
775 #define CPUID_7_0_EDX_STIBP (1U << 27)
776 /* Arch Capabilities */
777 #define CPUID_7_0_EDX_ARCH_CAPABILITIES (1U << 29)
778 /* Core Capability */
779 #define CPUID_7_0_EDX_CORE_CAPABILITY (1U << 30)
780 /* Speculative Store Bypass Disable */
781 #define CPUID_7_0_EDX_SPEC_CTRL_SSBD (1U << 31)
782
783 /* AVX512 BFloat16 Instruction */
784 #define CPUID_7_1_EAX_AVX512_BF16 (1U << 5)
785
786 /* CLZERO instruction */
787 #define CPUID_8000_0008_EBX_CLZERO (1U << 0)
788 /* Always save/restore FP error pointers */
789 #define CPUID_8000_0008_EBX_XSAVEERPTR (1U << 2)
790 /* Write back and do not invalidate cache */
791 #define CPUID_8000_0008_EBX_WBNOINVD (1U << 9)
792 /* Indirect Branch Prediction Barrier */
793 #define CPUID_8000_0008_EBX_IBPB (1U << 12)
794
795 #define CPUID_XSAVE_XSAVEOPT (1U << 0)
796 #define CPUID_XSAVE_XSAVEC (1U << 1)
797 #define CPUID_XSAVE_XGETBV1 (1U << 2)
798 #define CPUID_XSAVE_XSAVES (1U << 3)
799
800 #define CPUID_6_EAX_ARAT (1U << 2)
801
802 /* CPUID[0x80000007].EDX flags: */
803 #define CPUID_APM_INVTSC (1U << 8)
804
805 #define CPUID_VENDOR_SZ 12
806
807 #define CPUID_VENDOR_INTEL_1 0x756e6547 /* "Genu" */
808 #define CPUID_VENDOR_INTEL_2 0x49656e69 /* "ineI" */
809 #define CPUID_VENDOR_INTEL_3 0x6c65746e /* "ntel" */
810 #define CPUID_VENDOR_INTEL "GenuineIntel"
811
812 #define CPUID_VENDOR_AMD_1 0x68747541 /* "Auth" */
813 #define CPUID_VENDOR_AMD_2 0x69746e65 /* "enti" */
814 #define CPUID_VENDOR_AMD_3 0x444d4163 /* "cAMD" */
815 #define CPUID_VENDOR_AMD "AuthenticAMD"
816
817 #define CPUID_VENDOR_VIA "CentaurHauls"
818
819 #define CPUID_VENDOR_HYGON "HygonGenuine"
820
821 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
822 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
823 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
824 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
825 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
826 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
827
828 #define CPUID_MWAIT_IBE (1U << 1) /* Interrupts can exit capability */
829 #define CPUID_MWAIT_EMX (1U << 0) /* enumeration supported */
830
831 /* CPUID[0xB].ECX level types */
832 #define CPUID_TOPOLOGY_LEVEL_INVALID (0U << 8)
833 #define CPUID_TOPOLOGY_LEVEL_SMT (1U << 8)
834 #define CPUID_TOPOLOGY_LEVEL_CORE (2U << 8)
835 #define CPUID_TOPOLOGY_LEVEL_DIE (5U << 8)
836
837 /* MSR Feature Bits */
838 #define MSR_ARCH_CAP_RDCL_NO (1U << 0)
839 #define MSR_ARCH_CAP_IBRS_ALL (1U << 1)
840 #define MSR_ARCH_CAP_RSBA (1U << 2)
841 #define MSR_ARCH_CAP_SKIP_L1DFL_VMENTRY (1U << 3)
842 #define MSR_ARCH_CAP_SSB_NO (1U << 4)
843 #define MSR_ARCH_CAP_MDS_NO (1U << 5)
844 #define MSR_ARCH_CAP_PSCHANGE_MC_NO (1U << 6)
845 #define MSR_ARCH_CAP_TSX_CTRL_MSR (1U << 7)
846 #define MSR_ARCH_CAP_TAA_NO (1U << 8)
847
848 #define MSR_CORE_CAP_SPLIT_LOCK_DETECT (1U << 5)
849
850 /* VMX MSR features */
851 #define MSR_VMX_BASIC_VMCS_REVISION_MASK 0x7FFFFFFFull
852 #define MSR_VMX_BASIC_VMXON_REGION_SIZE_MASK (0x00001FFFull << 32)
853 #define MSR_VMX_BASIC_VMCS_MEM_TYPE_MASK (0x003C0000ull << 32)
854 #define MSR_VMX_BASIC_DUAL_MONITOR (1ULL << 49)
855 #define MSR_VMX_BASIC_INS_OUTS (1ULL << 54)
856 #define MSR_VMX_BASIC_TRUE_CTLS (1ULL << 55)
857
858 #define MSR_VMX_MISC_PREEMPTION_TIMER_SHIFT_MASK 0x1Full
859 #define MSR_VMX_MISC_STORE_LMA (1ULL << 5)
860 #define MSR_VMX_MISC_ACTIVITY_HLT (1ULL << 6)
861 #define MSR_VMX_MISC_ACTIVITY_SHUTDOWN (1ULL << 7)
862 #define MSR_VMX_MISC_ACTIVITY_WAIT_SIPI (1ULL << 8)
863 #define MSR_VMX_MISC_MAX_MSR_LIST_SIZE_MASK 0x0E000000ull
864 #define MSR_VMX_MISC_VMWRITE_VMEXIT (1ULL << 29)
865 #define MSR_VMX_MISC_ZERO_LEN_INJECT (1ULL << 30)
866
867 #define MSR_VMX_EPT_EXECONLY (1ULL << 0)
868 #define MSR_VMX_EPT_PAGE_WALK_LENGTH_4 (1ULL << 6)
869 #define MSR_VMX_EPT_PAGE_WALK_LENGTH_5 (1ULL << 7)
870 #define MSR_VMX_EPT_UC (1ULL << 8)
871 #define MSR_VMX_EPT_WB (1ULL << 14)
872 #define MSR_VMX_EPT_2MB (1ULL << 16)
873 #define MSR_VMX_EPT_1GB (1ULL << 17)
874 #define MSR_VMX_EPT_INVEPT (1ULL << 20)
875 #define MSR_VMX_EPT_AD_BITS (1ULL << 21)
876 #define MSR_VMX_EPT_ADVANCED_VMEXIT_INFO (1ULL << 22)
877 #define MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT (1ULL << 25)
878 #define MSR_VMX_EPT_INVEPT_ALL_CONTEXT (1ULL << 26)
879 #define MSR_VMX_EPT_INVVPID (1ULL << 32)
880 #define MSR_VMX_EPT_INVVPID_SINGLE_ADDR (1ULL << 40)
881 #define MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT (1ULL << 41)
882 #define MSR_VMX_EPT_INVVPID_ALL_CONTEXT (1ULL << 42)
883 #define MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS (1ULL << 43)
884
885 #define MSR_VMX_VMFUNC_EPT_SWITCHING (1ULL << 0)
886
887
888 /* VMX controls */
889 #define VMX_CPU_BASED_VIRTUAL_INTR_PENDING 0x00000004
890 #define VMX_CPU_BASED_USE_TSC_OFFSETING 0x00000008
891 #define VMX_CPU_BASED_HLT_EXITING 0x00000080
892 #define VMX_CPU_BASED_INVLPG_EXITING 0x00000200
893 #define VMX_CPU_BASED_MWAIT_EXITING 0x00000400
894 #define VMX_CPU_BASED_RDPMC_EXITING 0x00000800
895 #define VMX_CPU_BASED_RDTSC_EXITING 0x00001000
896 #define VMX_CPU_BASED_CR3_LOAD_EXITING 0x00008000
897 #define VMX_CPU_BASED_CR3_STORE_EXITING 0x00010000
898 #define VMX_CPU_BASED_CR8_LOAD_EXITING 0x00080000
899 #define VMX_CPU_BASED_CR8_STORE_EXITING 0x00100000
900 #define VMX_CPU_BASED_TPR_SHADOW 0x00200000
901 #define VMX_CPU_BASED_VIRTUAL_NMI_PENDING 0x00400000
902 #define VMX_CPU_BASED_MOV_DR_EXITING 0x00800000
903 #define VMX_CPU_BASED_UNCOND_IO_EXITING 0x01000000
904 #define VMX_CPU_BASED_USE_IO_BITMAPS 0x02000000
905 #define VMX_CPU_BASED_MONITOR_TRAP_FLAG 0x08000000
906 #define VMX_CPU_BASED_USE_MSR_BITMAPS 0x10000000
907 #define VMX_CPU_BASED_MONITOR_EXITING 0x20000000
908 #define VMX_CPU_BASED_PAUSE_EXITING 0x40000000
909 #define VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS 0x80000000
910
911 #define VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES 0x00000001
912 #define VMX_SECONDARY_EXEC_ENABLE_EPT 0x00000002
913 #define VMX_SECONDARY_EXEC_DESC 0x00000004
914 #define VMX_SECONDARY_EXEC_RDTSCP 0x00000008
915 #define VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE 0x00000010
916 #define VMX_SECONDARY_EXEC_ENABLE_VPID 0x00000020
917 #define VMX_SECONDARY_EXEC_WBINVD_EXITING 0x00000040
918 #define VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST 0x00000080
919 #define VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT 0x00000100
920 #define VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY 0x00000200
921 #define VMX_SECONDARY_EXEC_PAUSE_LOOP_EXITING 0x00000400
922 #define VMX_SECONDARY_EXEC_RDRAND_EXITING 0x00000800
923 #define VMX_SECONDARY_EXEC_ENABLE_INVPCID 0x00001000
924 #define VMX_SECONDARY_EXEC_ENABLE_VMFUNC 0x00002000
925 #define VMX_SECONDARY_EXEC_SHADOW_VMCS 0x00004000
926 #define VMX_SECONDARY_EXEC_ENCLS_EXITING 0x00008000
927 #define VMX_SECONDARY_EXEC_RDSEED_EXITING 0x00010000
928 #define VMX_SECONDARY_EXEC_ENABLE_PML 0x00020000
929 #define VMX_SECONDARY_EXEC_XSAVES 0x00100000
930
931 #define VMX_PIN_BASED_EXT_INTR_MASK 0x00000001
932 #define VMX_PIN_BASED_NMI_EXITING 0x00000008
933 #define VMX_PIN_BASED_VIRTUAL_NMIS 0x00000020
934 #define VMX_PIN_BASED_VMX_PREEMPTION_TIMER 0x00000040
935 #define VMX_PIN_BASED_POSTED_INTR 0x00000080
936
937 #define VMX_VM_EXIT_SAVE_DEBUG_CONTROLS 0x00000004
938 #define VMX_VM_EXIT_HOST_ADDR_SPACE_SIZE 0x00000200
939 #define VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL 0x00001000
940 #define VMX_VM_EXIT_ACK_INTR_ON_EXIT 0x00008000
941 #define VMX_VM_EXIT_SAVE_IA32_PAT 0x00040000
942 #define VMX_VM_EXIT_LOAD_IA32_PAT 0x00080000
943 #define VMX_VM_EXIT_SAVE_IA32_EFER 0x00100000
944 #define VMX_VM_EXIT_LOAD_IA32_EFER 0x00200000
945 #define VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER 0x00400000
946 #define VMX_VM_EXIT_CLEAR_BNDCFGS 0x00800000
947 #define VMX_VM_EXIT_PT_CONCEAL_PIP 0x01000000
948 #define VMX_VM_EXIT_CLEAR_IA32_RTIT_CTL 0x02000000
949
950 #define VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS 0x00000004
951 #define VMX_VM_ENTRY_IA32E_MODE 0x00000200
952 #define VMX_VM_ENTRY_SMM 0x00000400
953 #define VMX_VM_ENTRY_DEACT_DUAL_MONITOR 0x00000800
954 #define VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL 0x00002000
955 #define VMX_VM_ENTRY_LOAD_IA32_PAT 0x00004000
956 #define VMX_VM_ENTRY_LOAD_IA32_EFER 0x00008000
957 #define VMX_VM_ENTRY_LOAD_BNDCFGS 0x00010000
958 #define VMX_VM_ENTRY_PT_CONCEAL_PIP 0x00020000
959 #define VMX_VM_ENTRY_LOAD_IA32_RTIT_CTL 0x00040000
960
961 /* Supported Hyper-V Enlightenments */
962 #define HYPERV_FEAT_RELAXED 0
963 #define HYPERV_FEAT_VAPIC 1
964 #define HYPERV_FEAT_TIME 2
965 #define HYPERV_FEAT_CRASH 3
966 #define HYPERV_FEAT_RESET 4
967 #define HYPERV_FEAT_VPINDEX 5
968 #define HYPERV_FEAT_RUNTIME 6
969 #define HYPERV_FEAT_SYNIC 7
970 #define HYPERV_FEAT_STIMER 8
971 #define HYPERV_FEAT_FREQUENCIES 9
972 #define HYPERV_FEAT_REENLIGHTENMENT 10
973 #define HYPERV_FEAT_TLBFLUSH 11
974 #define HYPERV_FEAT_EVMCS 12
975 #define HYPERV_FEAT_IPI 13
976 #define HYPERV_FEAT_STIMER_DIRECT 14
977
978 #ifndef HYPERV_SPINLOCK_NEVER_RETRY
979 #define HYPERV_SPINLOCK_NEVER_RETRY 0xFFFFFFFF
980 #endif
981
982 #define EXCP00_DIVZ 0
983 #define EXCP01_DB 1
984 #define EXCP02_NMI 2
985 #define EXCP03_INT3 3
986 #define EXCP04_INTO 4
987 #define EXCP05_BOUND 5
988 #define EXCP06_ILLOP 6
989 #define EXCP07_PREX 7
990 #define EXCP08_DBLE 8
991 #define EXCP09_XERR 9
992 #define EXCP0A_TSS 10
993 #define EXCP0B_NOSEG 11
994 #define EXCP0C_STACK 12
995 #define EXCP0D_GPF 13
996 #define EXCP0E_PAGE 14
997 #define EXCP10_COPR 16
998 #define EXCP11_ALGN 17
999 #define EXCP12_MCHK 18
1000
1001 #define EXCP_SYSCALL 0x100 /* only happens in user only emulation
1002 for syscall instruction */
1003 #define EXCP_VMEXIT 0x100
1004
1005 /* i386-specific interrupt pending bits. */
1006 #define CPU_INTERRUPT_POLL CPU_INTERRUPT_TGT_EXT_1
1007 #define CPU_INTERRUPT_SMI CPU_INTERRUPT_TGT_EXT_2
1008 #define CPU_INTERRUPT_NMI CPU_INTERRUPT_TGT_EXT_3
1009 #define CPU_INTERRUPT_MCE CPU_INTERRUPT_TGT_EXT_4
1010 #define CPU_INTERRUPT_VIRQ CPU_INTERRUPT_TGT_INT_0
1011 #define CPU_INTERRUPT_SIPI CPU_INTERRUPT_TGT_INT_1
1012 #define CPU_INTERRUPT_TPR CPU_INTERRUPT_TGT_INT_2
1013
1014 /* Use a clearer name for this. */
1015 #define CPU_INTERRUPT_INIT CPU_INTERRUPT_RESET
1016
1017 /* Instead of computing the condition codes after each x86 instruction,
1018 * QEMU just stores one operand (called CC_SRC), the result
1019 * (called CC_DST) and the type of operation (called CC_OP). When the
1020 * condition codes are needed, the condition codes can be calculated
1021 * using this information. Condition codes are not generated if they
1022 * are only needed for conditional branches.
1023 */
1024 typedef enum {
1025 CC_OP_DYNAMIC, /* must use dynamic code to get cc_op */
1026 CC_OP_EFLAGS, /* all cc are explicitly computed, CC_SRC = flags */
1027
1028 CC_OP_MULB, /* modify all flags, C, O = (CC_SRC != 0) */
1029 CC_OP_MULW,
1030 CC_OP_MULL,
1031 CC_OP_MULQ,
1032
1033 CC_OP_ADDB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
1034 CC_OP_ADDW,
1035 CC_OP_ADDL,
1036 CC_OP_ADDQ,
1037
1038 CC_OP_ADCB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
1039 CC_OP_ADCW,
1040 CC_OP_ADCL,
1041 CC_OP_ADCQ,
1042
1043 CC_OP_SUBB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
1044 CC_OP_SUBW,
1045 CC_OP_SUBL,
1046 CC_OP_SUBQ,
1047
1048 CC_OP_SBBB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
1049 CC_OP_SBBW,
1050 CC_OP_SBBL,
1051 CC_OP_SBBQ,
1052
1053 CC_OP_LOGICB, /* modify all flags, CC_DST = res */
1054 CC_OP_LOGICW,
1055 CC_OP_LOGICL,
1056 CC_OP_LOGICQ,
1057
1058 CC_OP_INCB, /* modify all flags except, CC_DST = res, CC_SRC = C */
1059 CC_OP_INCW,
1060 CC_OP_INCL,
1061 CC_OP_INCQ,
1062
1063 CC_OP_DECB, /* modify all flags except, CC_DST = res, CC_SRC = C */
1064 CC_OP_DECW,
1065 CC_OP_DECL,
1066 CC_OP_DECQ,
1067
1068 CC_OP_SHLB, /* modify all flags, CC_DST = res, CC_SRC.msb = C */
1069 CC_OP_SHLW,
1070 CC_OP_SHLL,
1071 CC_OP_SHLQ,
1072
1073 CC_OP_SARB, /* modify all flags, CC_DST = res, CC_SRC.lsb = C */
1074 CC_OP_SARW,
1075 CC_OP_SARL,
1076 CC_OP_SARQ,
1077
1078 CC_OP_BMILGB, /* Z,S via CC_DST, C = SRC==0; O=0; P,A undefined */
1079 CC_OP_BMILGW,
1080 CC_OP_BMILGL,
1081 CC_OP_BMILGQ,
1082
1083 CC_OP_ADCX, /* CC_DST = C, CC_SRC = rest. */
1084 CC_OP_ADOX, /* CC_DST = O, CC_SRC = rest. */
1085 CC_OP_ADCOX, /* CC_DST = C, CC_SRC2 = O, CC_SRC = rest. */
1086
1087 CC_OP_CLR, /* Z set, all other flags clear. */
1088 CC_OP_POPCNT, /* Z via CC_SRC, all other flags clear. */
1089
1090 CC_OP_NB,
1091 } CCOp;
1092
1093 typedef struct SegmentCache {
1094 uint32_t selector;
1095 target_ulong base;
1096 uint32_t limit;
1097 uint32_t flags;
1098 } SegmentCache;
1099
1100 #define MMREG_UNION(n, bits) \
1101 union n { \
1102 uint8_t _b_##n[(bits)/8]; \
1103 uint16_t _w_##n[(bits)/16]; \
1104 uint32_t _l_##n[(bits)/32]; \
1105 uint64_t _q_##n[(bits)/64]; \
1106 float32 _s_##n[(bits)/32]; \
1107 float64 _d_##n[(bits)/64]; \
1108 }
1109
1110 typedef union {
1111 uint8_t _b[16];
1112 uint16_t _w[8];
1113 uint32_t _l[4];
1114 uint64_t _q[2];
1115 } XMMReg;
1116
1117 typedef union {
1118 uint8_t _b[32];
1119 uint16_t _w[16];
1120 uint32_t _l[8];
1121 uint64_t _q[4];
1122 } YMMReg;
1123
1124 typedef MMREG_UNION(ZMMReg, 512) ZMMReg;
1125 typedef MMREG_UNION(MMXReg, 64) MMXReg;
1126
1127 typedef struct BNDReg {
1128 uint64_t lb;
1129 uint64_t ub;
1130 } BNDReg;
1131
1132 typedef struct BNDCSReg {
1133 uint64_t cfgu;
1134 uint64_t sts;
1135 } BNDCSReg;
1136
1137 #define BNDCFG_ENABLE 1ULL
1138 #define BNDCFG_BNDPRESERVE 2ULL
1139 #define BNDCFG_BDIR_MASK TARGET_PAGE_MASK
1140
1141 #ifdef HOST_WORDS_BIGENDIAN
1142 #define ZMM_B(n) _b_ZMMReg[63 - (n)]
1143 #define ZMM_W(n) _w_ZMMReg[31 - (n)]
1144 #define ZMM_L(n) _l_ZMMReg[15 - (n)]
1145 #define ZMM_S(n) _s_ZMMReg[15 - (n)]
1146 #define ZMM_Q(n) _q_ZMMReg[7 - (n)]
1147 #define ZMM_D(n) _d_ZMMReg[7 - (n)]
1148
1149 #define MMX_B(n) _b_MMXReg[7 - (n)]
1150 #define MMX_W(n) _w_MMXReg[3 - (n)]
1151 #define MMX_L(n) _l_MMXReg[1 - (n)]
1152 #define MMX_S(n) _s_MMXReg[1 - (n)]
1153 #else
1154 #define ZMM_B(n) _b_ZMMReg[n]
1155 #define ZMM_W(n) _w_ZMMReg[n]
1156 #define ZMM_L(n) _l_ZMMReg[n]
1157 #define ZMM_S(n) _s_ZMMReg[n]
1158 #define ZMM_Q(n) _q_ZMMReg[n]
1159 #define ZMM_D(n) _d_ZMMReg[n]
1160
1161 #define MMX_B(n) _b_MMXReg[n]
1162 #define MMX_W(n) _w_MMXReg[n]
1163 #define MMX_L(n) _l_MMXReg[n]
1164 #define MMX_S(n) _s_MMXReg[n]
1165 #endif
1166 #define MMX_Q(n) _q_MMXReg[n]
1167
1168 typedef union {
1169 floatx80 d __attribute__((aligned(16)));
1170 MMXReg mmx;
1171 } FPReg;
1172
1173 typedef struct {
1174 uint64_t base;
1175 uint64_t mask;
1176 } MTRRVar;
1177
1178 #define CPU_NB_REGS64 16
1179 #define CPU_NB_REGS32 8
1180
1181 #ifdef TARGET_X86_64
1182 #define CPU_NB_REGS CPU_NB_REGS64
1183 #else
1184 #define CPU_NB_REGS CPU_NB_REGS32
1185 #endif
1186
1187 #define MAX_FIXED_COUNTERS 3
1188 #define MAX_GP_COUNTERS (MSR_IA32_PERF_STATUS - MSR_P6_EVNTSEL0)
1189
1190 #define TARGET_INSN_START_EXTRA_WORDS 1
1191
1192 #define NB_OPMASK_REGS 8
1193
1194 /* CPU can't have 0xFFFFFFFF APIC ID, use that value to distinguish
1195 * that APIC ID hasn't been set yet
1196 */
1197 #define UNASSIGNED_APIC_ID 0xFFFFFFFF
1198
1199 typedef union X86LegacyXSaveArea {
1200 struct {
1201 uint16_t fcw;
1202 uint16_t fsw;
1203 uint8_t ftw;
1204 uint8_t reserved;
1205 uint16_t fpop;
1206 uint64_t fpip;
1207 uint64_t fpdp;
1208 uint32_t mxcsr;
1209 uint32_t mxcsr_mask;
1210 FPReg fpregs[8];
1211 uint8_t xmm_regs[16][16];
1212 };
1213 uint8_t data[512];
1214 } X86LegacyXSaveArea;
1215
1216 typedef struct X86XSaveHeader {
1217 uint64_t xstate_bv;
1218 uint64_t xcomp_bv;
1219 uint64_t reserve0;
1220 uint8_t reserved[40];
1221 } X86XSaveHeader;
1222
1223 /* Ext. save area 2: AVX State */
1224 typedef struct XSaveAVX {
1225 uint8_t ymmh[16][16];
1226 } XSaveAVX;
1227
1228 /* Ext. save area 3: BNDREG */
1229 typedef struct XSaveBNDREG {
1230 BNDReg bnd_regs[4];
1231 } XSaveBNDREG;
1232
1233 /* Ext. save area 4: BNDCSR */
1234 typedef union XSaveBNDCSR {
1235 BNDCSReg bndcsr;
1236 uint8_t data[64];
1237 } XSaveBNDCSR;
1238
1239 /* Ext. save area 5: Opmask */
1240 typedef struct XSaveOpmask {
1241 uint64_t opmask_regs[NB_OPMASK_REGS];
1242 } XSaveOpmask;
1243
1244 /* Ext. save area 6: ZMM_Hi256 */
1245 typedef struct XSaveZMM_Hi256 {
1246 uint8_t zmm_hi256[16][32];
1247 } XSaveZMM_Hi256;
1248
1249 /* Ext. save area 7: Hi16_ZMM */
1250 typedef struct XSaveHi16_ZMM {
1251 uint8_t hi16_zmm[16][64];
1252 } XSaveHi16_ZMM;
1253
1254 /* Ext. save area 9: PKRU state */
1255 typedef struct XSavePKRU {
1256 uint32_t pkru;
1257 uint32_t padding;
1258 } XSavePKRU;
1259
1260 typedef struct X86XSaveArea {
1261 X86LegacyXSaveArea legacy;
1262 X86XSaveHeader header;
1263
1264 /* Extended save areas: */
1265
1266 /* AVX State: */
1267 XSaveAVX avx_state;
1268 uint8_t padding[960 - 576 - sizeof(XSaveAVX)];
1269 /* MPX State: */
1270 XSaveBNDREG bndreg_state;
1271 XSaveBNDCSR bndcsr_state;
1272 /* AVX-512 State: */
1273 XSaveOpmask opmask_state;
1274 XSaveZMM_Hi256 zmm_hi256_state;
1275 XSaveHi16_ZMM hi16_zmm_state;
1276 /* PKRU State: */
1277 XSavePKRU pkru_state;
1278 } X86XSaveArea;
1279
1280 QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, avx_state) != 0x240);
1281 QEMU_BUILD_BUG_ON(sizeof(XSaveAVX) != 0x100);
1282 QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, bndreg_state) != 0x3c0);
1283 QEMU_BUILD_BUG_ON(sizeof(XSaveBNDREG) != 0x40);
1284 QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, bndcsr_state) != 0x400);
1285 QEMU_BUILD_BUG_ON(sizeof(XSaveBNDCSR) != 0x40);
1286 QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, opmask_state) != 0x440);
1287 QEMU_BUILD_BUG_ON(sizeof(XSaveOpmask) != 0x40);
1288 QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, zmm_hi256_state) != 0x480);
1289 QEMU_BUILD_BUG_ON(sizeof(XSaveZMM_Hi256) != 0x200);
1290 QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, hi16_zmm_state) != 0x680);
1291 QEMU_BUILD_BUG_ON(sizeof(XSaveHi16_ZMM) != 0x400);
1292 QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, pkru_state) != 0xA80);
1293 QEMU_BUILD_BUG_ON(sizeof(XSavePKRU) != 0x8);
1294
1295 typedef enum TPRAccess {
1296 TPR_ACCESS_READ,
1297 TPR_ACCESS_WRITE,
1298 } TPRAccess;
1299
1300 /* Cache information data structures: */
1301
1302 enum CacheType {
1303 DATA_CACHE,
1304 INSTRUCTION_CACHE,
1305 UNIFIED_CACHE
1306 };
1307
1308 typedef struct CPUCacheInfo {
1309 enum CacheType type;
1310 uint8_t level;
1311 /* Size in bytes */
1312 uint32_t size;
1313 /* Line size, in bytes */
1314 uint16_t line_size;
1315 /*
1316 * Associativity.
1317 * Note: representation of fully-associative caches is not implemented
1318 */
1319 uint8_t associativity;
1320 /* Physical line partitions. CPUID[0x8000001D].EBX, CPUID[4].EBX */
1321 uint8_t partitions;
1322 /* Number of sets. CPUID[0x8000001D].ECX, CPUID[4].ECX */
1323 uint32_t sets;
1324 /*
1325 * Lines per tag.
1326 * AMD-specific: CPUID[0x80000005], CPUID[0x80000006].
1327 * (Is this synonym to @partitions?)
1328 */
1329 uint8_t lines_per_tag;
1330
1331 /* Self-initializing cache */
1332 bool self_init;
1333 /*
1334 * WBINVD/INVD is not guaranteed to act upon lower level caches of
1335 * non-originating threads sharing this cache.
1336 * CPUID[4].EDX[bit 0], CPUID[0x8000001D].EDX[bit 0]
1337 */
1338 bool no_invd_sharing;
1339 /*
1340 * Cache is inclusive of lower cache levels.
1341 * CPUID[4].EDX[bit 1], CPUID[0x8000001D].EDX[bit 1].
1342 */
1343 bool inclusive;
1344 /*
1345 * A complex function is used to index the cache, potentially using all
1346 * address bits. CPUID[4].EDX[bit 2].
1347 */
1348 bool complex_indexing;
1349 } CPUCacheInfo;
1350
1351
1352 typedef struct CPUCaches {
1353 CPUCacheInfo *l1d_cache;
1354 CPUCacheInfo *l1i_cache;
1355 CPUCacheInfo *l2_cache;
1356 CPUCacheInfo *l3_cache;
1357 } CPUCaches;
1358
1359 typedef struct CPUX86State {
1360 /* standard registers */
1361 target_ulong regs[CPU_NB_REGS];
1362 target_ulong eip;
1363 target_ulong eflags; /* eflags register. During CPU emulation, CC
1364 flags and DF are set to zero because they are
1365 stored elsewhere */
1366
1367 /* emulator internal eflags handling */
1368 target_ulong cc_dst;
1369 target_ulong cc_src;
1370 target_ulong cc_src2;
1371 uint32_t cc_op;
1372 int32_t df; /* D flag : 1 if D = 0, -1 if D = 1 */
1373 uint32_t hflags; /* TB flags, see HF_xxx constants. These flags
1374 are known at translation time. */
1375 uint32_t hflags2; /* various other flags, see HF2_xxx constants. */
1376
1377 /* segments */
1378 SegmentCache segs[6]; /* selector values */
1379 SegmentCache ldt;
1380 SegmentCache tr;
1381 SegmentCache gdt; /* only base and limit are used */
1382 SegmentCache idt; /* only base and limit are used */
1383
1384 target_ulong cr[5]; /* NOTE: cr1 is unused */
1385 int32_t a20_mask;
1386
1387 BNDReg bnd_regs[4];
1388 BNDCSReg bndcs_regs;
1389 uint64_t msr_bndcfgs;
1390 uint64_t efer;
1391
1392 /* Beginning of state preserved by INIT (dummy marker). */
1393 struct {} start_init_save;
1394
1395 /* FPU state */
1396 unsigned int fpstt; /* top of stack index */
1397 uint16_t fpus;
1398 uint16_t fpuc;
1399 uint8_t fptags[8]; /* 0 = valid, 1 = empty */
1400 FPReg fpregs[8];
1401 /* KVM-only so far */
1402 uint16_t fpop;
1403 uint64_t fpip;
1404 uint64_t fpdp;
1405
1406 /* emulator internal variables */
1407 float_status fp_status;
1408 floatx80 ft0;
1409
1410 float_status mmx_status; /* for 3DNow! float ops */
1411 float_status sse_status;
1412 uint32_t mxcsr;
1413 ZMMReg xmm_regs[CPU_NB_REGS == 8 ? 8 : 32];
1414 ZMMReg xmm_t0;
1415 MMXReg mmx_t0;
1416
1417 XMMReg ymmh_regs[CPU_NB_REGS];
1418
1419 uint64_t opmask_regs[NB_OPMASK_REGS];
1420 YMMReg zmmh_regs[CPU_NB_REGS];
1421 ZMMReg hi16_zmm_regs[CPU_NB_REGS];
1422
1423 /* sysenter registers */
1424 uint32_t sysenter_cs;
1425 target_ulong sysenter_esp;
1426 target_ulong sysenter_eip;
1427 uint64_t star;
1428
1429 uint64_t vm_hsave;
1430
1431 #ifdef TARGET_X86_64
1432 target_ulong lstar;
1433 target_ulong cstar;
1434 target_ulong fmask;
1435 target_ulong kernelgsbase;
1436 #endif
1437
1438 uint64_t tsc;
1439 uint64_t tsc_adjust;
1440 uint64_t tsc_deadline;
1441 uint64_t tsc_aux;
1442
1443 uint64_t xcr0;
1444
1445 uint64_t mcg_status;
1446 uint64_t msr_ia32_misc_enable;
1447 uint64_t msr_ia32_feature_control;
1448
1449 uint64_t msr_fixed_ctr_ctrl;
1450 uint64_t msr_global_ctrl;
1451 uint64_t msr_global_status;
1452 uint64_t msr_global_ovf_ctrl;
1453 uint64_t msr_fixed_counters[MAX_FIXED_COUNTERS];
1454 uint64_t msr_gp_counters[MAX_GP_COUNTERS];
1455 uint64_t msr_gp_evtsel[MAX_GP_COUNTERS];
1456
1457 uint64_t pat;
1458 uint32_t smbase;
1459 uint64_t msr_smi_count;
1460
1461 uint32_t pkru;
1462 uint32_t tsx_ctrl;
1463
1464 uint64_t spec_ctrl;
1465 uint64_t virt_ssbd;
1466
1467 /* End of state preserved by INIT (dummy marker). */
1468 struct {} end_init_save;
1469
1470 uint64_t system_time_msr;
1471 uint64_t wall_clock_msr;
1472 uint64_t steal_time_msr;
1473 uint64_t async_pf_en_msr;
1474 uint64_t pv_eoi_en_msr;
1475 uint64_t poll_control_msr;
1476
1477 /* Partition-wide HV MSRs, will be updated only on the first vcpu */
1478 uint64_t msr_hv_hypercall;
1479 uint64_t msr_hv_guest_os_id;
1480 uint64_t msr_hv_tsc;
1481
1482 /* Per-VCPU HV MSRs */
1483 uint64_t msr_hv_vapic;
1484 uint64_t msr_hv_crash_params[HV_CRASH_PARAMS];
1485 uint64_t msr_hv_runtime;
1486 uint64_t msr_hv_synic_control;
1487 uint64_t msr_hv_synic_evt_page;
1488 uint64_t msr_hv_synic_msg_page;
1489 uint64_t msr_hv_synic_sint[HV_SINT_COUNT];
1490 uint64_t msr_hv_stimer_config[HV_STIMER_COUNT];
1491 uint64_t msr_hv_stimer_count[HV_STIMER_COUNT];
1492 uint64_t msr_hv_reenlightenment_control;
1493 uint64_t msr_hv_tsc_emulation_control;
1494 uint64_t msr_hv_tsc_emulation_status;
1495
1496 uint64_t msr_rtit_ctrl;
1497 uint64_t msr_rtit_status;
1498 uint64_t msr_rtit_output_base;
1499 uint64_t msr_rtit_output_mask;
1500 uint64_t msr_rtit_cr3_match;
1501 uint64_t msr_rtit_addrs[MAX_RTIT_ADDRS];
1502
1503 /* exception/interrupt handling */
1504 int error_code;
1505 int exception_is_int;
1506 target_ulong exception_next_eip;
1507 target_ulong dr[8]; /* debug registers; note dr4 and dr5 are unused */
1508 union {
1509 struct CPUBreakpoint *cpu_breakpoint[4];
1510 struct CPUWatchpoint *cpu_watchpoint[4];
1511 }; /* break/watchpoints for dr[0..3] */
1512 int old_exception; /* exception in flight */
1513
1514 uint64_t vm_vmcb;
1515 uint64_t tsc_offset;
1516 uint64_t intercept;
1517 uint16_t intercept_cr_read;
1518 uint16_t intercept_cr_write;
1519 uint16_t intercept_dr_read;
1520 uint16_t intercept_dr_write;
1521 uint32_t intercept_exceptions;
1522 uint64_t nested_cr3;
1523 uint32_t nested_pg_mode;
1524 uint8_t v_tpr;
1525
1526 /* KVM states, automatically cleared on reset */
1527 uint8_t nmi_injected;
1528 uint8_t nmi_pending;
1529
1530 uintptr_t retaddr;
1531
1532 /* Fields up to this point are cleared by a CPU reset */
1533 struct {} end_reset_fields;
1534
1535 /* Fields after this point are preserved across CPU reset. */
1536
1537 /* processor features (e.g. for CPUID insn) */
1538 /* Minimum cpuid leaf 7 value */
1539 uint32_t cpuid_level_func7;
1540 /* Actual cpuid leaf 7 value */
1541 uint32_t cpuid_min_level_func7;
1542 /* Minimum level/xlevel/xlevel2, based on CPU model + features */
1543 uint32_t cpuid_min_level, cpuid_min_xlevel, cpuid_min_xlevel2;
1544 /* Maximum level/xlevel/xlevel2 value for auto-assignment: */
1545 uint32_t cpuid_max_level, cpuid_max_xlevel, cpuid_max_xlevel2;
1546 /* Actual level/xlevel/xlevel2 value: */
1547 uint32_t cpuid_level, cpuid_xlevel, cpuid_xlevel2;
1548 uint32_t cpuid_vendor1;
1549 uint32_t cpuid_vendor2;
1550 uint32_t cpuid_vendor3;
1551 uint32_t cpuid_version;
1552 FeatureWordArray features;
1553 /* Features that were explicitly enabled/disabled */
1554 FeatureWordArray user_features;
1555 uint32_t cpuid_model[12];
1556 /* Cache information for CPUID. When legacy-cache=on, the cache data
1557 * on each CPUID leaf will be different, because we keep compatibility
1558 * with old QEMU versions.
1559 */
1560 CPUCaches cache_info_cpuid2, cache_info_cpuid4, cache_info_amd;
1561
1562 /* MTRRs */
1563 uint64_t mtrr_fixed[11];
1564 uint64_t mtrr_deftype;
1565 MTRRVar mtrr_var[MSR_MTRRcap_VCNT];
1566
1567 /* For KVM */
1568 uint32_t mp_state;
1569 int32_t exception_nr;
1570 int32_t interrupt_injected;
1571 uint8_t soft_interrupt;
1572 uint8_t exception_pending;
1573 uint8_t exception_injected;
1574 uint8_t has_error_code;
1575 uint8_t exception_has_payload;
1576 uint64_t exception_payload;
1577 uint32_t ins_len;
1578 uint32_t sipi_vector;
1579 bool tsc_valid;
1580 int64_t tsc_khz;
1581 int64_t user_tsc_khz; /* for sanity check only */
1582 #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
1583 void *xsave_buf;
1584 #endif
1585 #if defined(CONFIG_KVM)
1586 struct kvm_nested_state *nested_state;
1587 #endif
1588 #if defined(CONFIG_HVF)
1589 HVFX86EmulatorState *hvf_emul;
1590 #endif
1591
1592 uint64_t mcg_cap;
1593 uint64_t mcg_ctl;
1594 uint64_t mcg_ext_ctl;
1595 uint64_t mce_banks[MCE_BANKS_DEF*4];
1596 uint64_t xstate_bv;
1597
1598 /* vmstate */
1599 uint16_t fpus_vmstate;
1600 uint16_t fptag_vmstate;
1601 uint16_t fpregs_format_vmstate;
1602
1603 uint64_t xss;
1604 uint32_t umwait;
1605
1606 TPRAccess tpr_access_type;
1607
1608 unsigned nr_dies;
1609 } CPUX86State;
1610
1611 struct kvm_msrs;
1612
1613 /**
1614 * X86CPU:
1615 * @env: #CPUX86State
1616 * @migratable: If set, only migratable flags will be accepted when "enforce"
1617 * mode is used, and only migratable flags will be included in the "host"
1618 * CPU model.
1619 *
1620 * An x86 CPU.
1621 */
1622 struct X86CPU {
1623 /*< private >*/
1624 CPUState parent_obj;
1625 /*< public >*/
1626
1627 CPUNegativeOffsetState neg;
1628 CPUX86State env;
1629
1630 uint32_t hyperv_spinlock_attempts;
1631 char *hyperv_vendor_id;
1632 bool hyperv_synic_kvm_only;
1633 uint64_t hyperv_features;
1634 bool hyperv_passthrough;
1635 OnOffAuto hyperv_no_nonarch_cs;
1636
1637 bool check_cpuid;
1638 bool enforce_cpuid;
1639 /*
1640 * Force features to be enabled even if the host doesn't support them.
1641 * This is dangerous and should be done only for testing CPUID
1642 * compatibility.
1643 */
1644 bool force_features;
1645 bool expose_kvm;
1646 bool expose_tcg;
1647 bool migratable;
1648 bool migrate_smi_count;
1649 bool max_features; /* Enable all supported features automatically */
1650 uint32_t apic_id;
1651
1652 /* Enables publishing of TSC increment and Local APIC bus frequencies to
1653 * the guest OS in CPUID page 0x40000010, the same way that VMWare does. */
1654 bool vmware_cpuid_freq;
1655
1656 /* if true the CPUID code directly forward host cache leaves to the guest */
1657 bool cache_info_passthrough;
1658
1659 /* if true the CPUID code directly forwards
1660 * host monitor/mwait leaves to the guest */
1661 struct {
1662 uint32_t eax;
1663 uint32_t ebx;
1664 uint32_t ecx;
1665 uint32_t edx;
1666 } mwait;
1667
1668 /* Features that were filtered out because of missing host capabilities */
1669 FeatureWordArray filtered_features;
1670
1671 /* Enable PMU CPUID bits. This can't be enabled by default yet because
1672 * it doesn't have ABI stability guarantees, as it passes all PMU CPUID
1673 * bits returned by GET_SUPPORTED_CPUID (that depend on host CPU and kernel
1674 * capabilities) directly to the guest.
1675 */
1676 bool enable_pmu;
1677
1678 /* LMCE support can be enabled/disabled via cpu option 'lmce=on/off'. It is
1679 * disabled by default to avoid breaking migration between QEMU with
1680 * different LMCE configurations.
1681 */
1682 bool enable_lmce;
1683
1684 /* Compatibility bits for old machine types.
1685 * If true present virtual l3 cache for VM, the vcpus in the same virtual
1686 * socket share an virtual l3 cache.
1687 */
1688 bool enable_l3_cache;
1689
1690 /* Compatibility bits for old machine types.
1691 * If true present the old cache topology information
1692 */
1693 bool legacy_cache;
1694
1695 /* Compatibility bits for old machine types: */
1696 bool enable_cpuid_0xb;
1697
1698 /* Enable auto level-increase for all CPUID leaves */
1699 bool full_cpuid_auto_level;
1700
1701 /* Enable auto level-increase for Intel Processor Trace leave */
1702 bool intel_pt_auto_level;
1703
1704 /* if true fill the top bits of the MTRR_PHYSMASKn variable range */
1705 bool fill_mtrr_mask;
1706
1707 /* if true override the phys_bits value with a value read from the host */
1708 bool host_phys_bits;
1709
1710 /* if set, limit maximum value for phys_bits when host_phys_bits is true */
1711 uint8_t host_phys_bits_limit;
1712
1713 /* Stop SMI delivery for migration compatibility with old machines */
1714 bool kvm_no_smi_migration;
1715
1716 /* Number of physical address bits supported */
1717 uint32_t phys_bits;
1718
1719 /* in order to simplify APIC support, we leave this pointer to the
1720 user */
1721 struct DeviceState *apic_state;
1722 struct MemoryRegion *cpu_as_root, *cpu_as_mem, *smram;
1723 Notifier machine_done;
1724
1725 struct kvm_msrs *kvm_msr_buf;
1726
1727 int32_t node_id; /* NUMA node this CPU belongs to */
1728 int32_t socket_id;
1729 int32_t die_id;
1730 int32_t core_id;
1731 int32_t thread_id;
1732
1733 int32_t hv_max_vps;
1734 };
1735
1736
1737 #ifndef CONFIG_USER_ONLY
1738 extern VMStateDescription vmstate_x86_cpu;
1739 #endif
1740
1741 /**
1742 * x86_cpu_do_interrupt:
1743 * @cpu: vCPU the interrupt is to be handled by.
1744 */
1745 void x86_cpu_do_interrupt(CPUState *cpu);
1746 bool x86_cpu_exec_interrupt(CPUState *cpu, int int_req);
1747 int x86_cpu_pending_interrupt(CPUState *cs, int interrupt_request);
1748
1749 int x86_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cpu,
1750 int cpuid, void *opaque);
1751 int x86_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cpu,
1752 int cpuid, void *opaque);
1753 int x86_cpu_write_elf64_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
1754 void *opaque);
1755 int x86_cpu_write_elf32_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
1756 void *opaque);
1757
1758 void x86_cpu_get_memory_mapping(CPUState *cpu, MemoryMappingList *list,
1759 Error **errp);
1760
1761 void x86_cpu_dump_state(CPUState *cs, FILE *f, int flags);
1762
1763 hwaddr x86_cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr,
1764 MemTxAttrs *attrs);
1765
1766 int x86_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg);
1767 int x86_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
1768
1769 void x86_cpu_exec_enter(CPUState *cpu);
1770 void x86_cpu_exec_exit(CPUState *cpu);
1771
1772 void x86_cpu_list(void);
1773 int cpu_x86_support_mca_broadcast(CPUX86State *env);
1774
1775 int cpu_get_pic_interrupt(CPUX86State *s);
1776 /* MSDOS compatibility mode FPU exception support */
1777 void x86_register_ferr_irq(qemu_irq irq);
1778 void cpu_set_ignne(void);
1779 /* mpx_helper.c */
1780 void cpu_sync_bndcs_hflags(CPUX86State *env);
1781
1782 /* this function must always be used to load data in the segment
1783 cache: it synchronizes the hflags with the segment cache values */
1784 static inline void cpu_x86_load_seg_cache(CPUX86State *env,
1785 int seg_reg, unsigned int selector,
1786 target_ulong base,
1787 unsigned int limit,
1788 unsigned int flags)
1789 {
1790 SegmentCache *sc;
1791 unsigned int new_hflags;
1792
1793 sc = &env->segs[seg_reg];
1794 sc->selector = selector;
1795 sc->base = base;
1796 sc->limit = limit;
1797 sc->flags = flags;
1798
1799 /* update the hidden flags */
1800 {
1801 if (seg_reg == R_CS) {
1802 #ifdef TARGET_X86_64
1803 if ((env->hflags & HF_LMA_MASK) && (flags & DESC_L_MASK)) {
1804 /* long mode */
1805 env->hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
1806 env->hflags &= ~(HF_ADDSEG_MASK);
1807 } else
1808 #endif
1809 {
1810 /* legacy / compatibility case */
1811 new_hflags = (env->segs[R_CS].flags & DESC_B_MASK)
1812 >> (DESC_B_SHIFT - HF_CS32_SHIFT);
1813 env->hflags = (env->hflags & ~(HF_CS32_MASK | HF_CS64_MASK)) |
1814 new_hflags;
1815 }
1816 }
1817 if (seg_reg == R_SS) {
1818 int cpl = (flags >> DESC_DPL_SHIFT) & 3;
1819 #if HF_CPL_MASK != 3
1820 #error HF_CPL_MASK is hardcoded
1821 #endif
1822 env->hflags = (env->hflags & ~HF_CPL_MASK) | cpl;
1823 /* Possibly switch between BNDCFGS and BNDCFGU */
1824 cpu_sync_bndcs_hflags(env);
1825 }
1826 new_hflags = (env->segs[R_SS].flags & DESC_B_MASK)
1827 >> (DESC_B_SHIFT - HF_SS32_SHIFT);
1828 if (env->hflags & HF_CS64_MASK) {
1829 /* zero base assumed for DS, ES and SS in long mode */
1830 } else if (!(env->cr[0] & CR0_PE_MASK) ||
1831 (env->eflags & VM_MASK) ||
1832 !(env->hflags & HF_CS32_MASK)) {
1833 /* XXX: try to avoid this test. The problem comes from the
1834 fact that is real mode or vm86 mode we only modify the
1835 'base' and 'selector' fields of the segment cache to go
1836 faster. A solution may be to force addseg to one in
1837 translate-i386.c. */
1838 new_hflags |= HF_ADDSEG_MASK;
1839 } else {
1840 new_hflags |= ((env->segs[R_DS].base |
1841 env->segs[R_ES].base |
1842 env->segs[R_SS].base) != 0) <<
1843 HF_ADDSEG_SHIFT;
1844 }
1845 env->hflags = (env->hflags &
1846 ~(HF_SS32_MASK | HF_ADDSEG_MASK)) | new_hflags;
1847 }
1848 }
1849
1850 static inline void cpu_x86_load_seg_cache_sipi(X86CPU *cpu,
1851 uint8_t sipi_vector)
1852 {
1853 CPUState *cs = CPU(cpu);
1854 CPUX86State *env = &cpu->env;
1855
1856 env->eip = 0;
1857 cpu_x86_load_seg_cache(env, R_CS, sipi_vector << 8,
1858 sipi_vector << 12,
1859 env->segs[R_CS].limit,
1860 env->segs[R_CS].flags);
1861 cs->halted = 0;
1862 }
1863
1864 int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
1865 target_ulong *base, unsigned int *limit,
1866 unsigned int *flags);
1867
1868 /* op_helper.c */
1869 /* used for debug or cpu save/restore */
1870
1871 /* cpu-exec.c */
1872 /* the following helpers are only usable in user mode simulation as
1873 they can trigger unexpected exceptions */
1874 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector);
1875 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32);
1876 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32);
1877 void cpu_x86_fxsave(CPUX86State *s, target_ulong ptr);
1878 void cpu_x86_fxrstor(CPUX86State *s, target_ulong ptr);
1879
1880 /* you can call this signal handler from your SIGBUS and SIGSEGV
1881 signal handlers to inform the virtual CPU of exceptions. non zero
1882 is returned if the signal was handled by the virtual CPU. */
1883 int cpu_x86_signal_handler(int host_signum, void *pinfo,
1884 void *puc);
1885
1886 /* cpu.c */
1887 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
1888 uint32_t *eax, uint32_t *ebx,
1889 uint32_t *ecx, uint32_t *edx);
1890 void cpu_clear_apic_feature(CPUX86State *env);
1891 void host_cpuid(uint32_t function, uint32_t count,
1892 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx);
1893 void host_vendor_fms(char *vendor, int *family, int *model, int *stepping);
1894
1895 /* helper.c */
1896 bool x86_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
1897 MMUAccessType access_type, int mmu_idx,
1898 bool probe, uintptr_t retaddr);
1899 void x86_cpu_set_a20(X86CPU *cpu, int a20_state);
1900
1901 #ifndef CONFIG_USER_ONLY
1902 static inline int x86_asidx_from_attrs(CPUState *cs, MemTxAttrs attrs)
1903 {
1904 return !!attrs.secure;
1905 }
1906
1907 static inline AddressSpace *cpu_addressspace(CPUState *cs, MemTxAttrs attrs)
1908 {
1909 return cpu_get_address_space(cs, cpu_asidx_from_attrs(cs, attrs));
1910 }
1911
1912 uint8_t x86_ldub_phys(CPUState *cs, hwaddr addr);
1913 uint32_t x86_lduw_phys(CPUState *cs, hwaddr addr);
1914 uint32_t x86_ldl_phys(CPUState *cs, hwaddr addr);
1915 uint64_t x86_ldq_phys(CPUState *cs, hwaddr addr);
1916 void x86_stb_phys(CPUState *cs, hwaddr addr, uint8_t val);
1917 void x86_stl_phys_notdirty(CPUState *cs, hwaddr addr, uint32_t val);
1918 void x86_stw_phys(CPUState *cs, hwaddr addr, uint32_t val);
1919 void x86_stl_phys(CPUState *cs, hwaddr addr, uint32_t val);
1920 void x86_stq_phys(CPUState *cs, hwaddr addr, uint64_t val);
1921 #endif
1922
1923 void breakpoint_handler(CPUState *cs);
1924
1925 /* will be suppressed */
1926 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0);
1927 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3);
1928 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4);
1929 void cpu_x86_update_dr7(CPUX86State *env, uint32_t new_dr7);
1930
1931 /* hw/pc.c */
1932 uint64_t cpu_get_tsc(CPUX86State *env);
1933
1934 /* XXX: This value should match the one returned by CPUID
1935 * and in exec.c */
1936 # if defined(TARGET_X86_64)
1937 # define TCG_PHYS_ADDR_BITS 40
1938 # else
1939 # define TCG_PHYS_ADDR_BITS 36
1940 # endif
1941
1942 #define PHYS_ADDR_MASK MAKE_64BIT_MASK(0, TCG_PHYS_ADDR_BITS)
1943
1944 #define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU
1945 #define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX)
1946 #define CPU_RESOLVING_TYPE TYPE_X86_CPU
1947
1948 #ifdef TARGET_X86_64
1949 #define TARGET_DEFAULT_CPU_TYPE X86_CPU_TYPE_NAME("qemu64")
1950 #else
1951 #define TARGET_DEFAULT_CPU_TYPE X86_CPU_TYPE_NAME("qemu32")
1952 #endif
1953
1954 #define cpu_signal_handler cpu_x86_signal_handler
1955 #define cpu_list x86_cpu_list
1956
1957 /* MMU modes definitions */
1958 #define MMU_MODE0_SUFFIX _ksmap
1959 #define MMU_MODE1_SUFFIX _user
1960 #define MMU_MODE2_SUFFIX _knosmap /* SMAP disabled or CPL<3 && AC=1 */
1961 #define MMU_KSMAP_IDX 0
1962 #define MMU_USER_IDX 1
1963 #define MMU_KNOSMAP_IDX 2
1964 static inline int cpu_mmu_index(CPUX86State *env, bool ifetch)
1965 {
1966 return (env->hflags & HF_CPL_MASK) == 3 ? MMU_USER_IDX :
1967 (!(env->hflags & HF_SMAP_MASK) || (env->eflags & AC_MASK))
1968 ? MMU_KNOSMAP_IDX : MMU_KSMAP_IDX;
1969 }
1970
1971 static inline int cpu_mmu_index_kernel(CPUX86State *env)
1972 {
1973 return !(env->hflags & HF_SMAP_MASK) ? MMU_KNOSMAP_IDX :
1974 ((env->hflags & HF_CPL_MASK) < 3 && (env->eflags & AC_MASK))
1975 ? MMU_KNOSMAP_IDX : MMU_KSMAP_IDX;
1976 }
1977
1978 #define CC_DST (env->cc_dst)
1979 #define CC_SRC (env->cc_src)
1980 #define CC_SRC2 (env->cc_src2)
1981 #define CC_OP (env->cc_op)
1982
1983 /* n must be a constant to be efficient */
1984 static inline target_long lshift(target_long x, int n)
1985 {
1986 if (n >= 0) {
1987 return x << n;
1988 } else {
1989 return x >> (-n);
1990 }
1991 }
1992
1993 /* float macros */
1994 #define FT0 (env->ft0)
1995 #define ST0 (env->fpregs[env->fpstt].d)
1996 #define ST(n) (env->fpregs[(env->fpstt + (n)) & 7].d)
1997 #define ST1 ST(1)
1998
1999 /* translate.c */
2000 void tcg_x86_init(void);
2001
2002 typedef CPUX86State CPUArchState;
2003 typedef X86CPU ArchCPU;
2004
2005 #include "exec/cpu-all.h"
2006 #include "svm.h"
2007
2008 #if !defined(CONFIG_USER_ONLY)
2009 #include "hw/i386/apic.h"
2010 #endif
2011
2012 static inline void cpu_get_tb_cpu_state(CPUX86State *env, target_ulong *pc,
2013 target_ulong *cs_base, uint32_t *flags)
2014 {
2015 *cs_base = env->segs[R_CS].base;
2016 *pc = *cs_base + env->eip;
2017 *flags = env->hflags |
2018 (env->eflags & (IOPL_MASK | TF_MASK | RF_MASK | VM_MASK | AC_MASK));
2019 }
2020
2021 void do_cpu_init(X86CPU *cpu);
2022 void do_cpu_sipi(X86CPU *cpu);
2023
2024 #define MCE_INJECT_BROADCAST 1
2025 #define MCE_INJECT_UNCOND_AO 2
2026
2027 void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank,
2028 uint64_t status, uint64_t mcg_status, uint64_t addr,
2029 uint64_t misc, int flags);
2030
2031 /* excp_helper.c */
2032 void QEMU_NORETURN raise_exception(CPUX86State *env, int exception_index);
2033 void QEMU_NORETURN raise_exception_ra(CPUX86State *env, int exception_index,
2034 uintptr_t retaddr);
2035 void QEMU_NORETURN raise_exception_err(CPUX86State *env, int exception_index,
2036 int error_code);
2037 void QEMU_NORETURN raise_exception_err_ra(CPUX86State *env, int exception_index,
2038 int error_code, uintptr_t retaddr);
2039 void QEMU_NORETURN raise_interrupt(CPUX86State *nenv, int intno, int is_int,
2040 int error_code, int next_eip_addend);
2041
2042 /* cc_helper.c */
2043 extern const uint8_t parity_table[256];
2044 uint32_t cpu_cc_compute_all(CPUX86State *env1, int op);
2045
2046 static inline uint32_t cpu_compute_eflags(CPUX86State *env)
2047 {
2048 uint32_t eflags = env->eflags;
2049 if (tcg_enabled()) {
2050 eflags |= cpu_cc_compute_all(env, CC_OP) | (env->df & DF_MASK);
2051 }
2052 return eflags;
2053 }
2054
2055 /* NOTE: the translator must set DisasContext.cc_op to CC_OP_EFLAGS
2056 * after generating a call to a helper that uses this.
2057 */
2058 static inline void cpu_load_eflags(CPUX86State *env, int eflags,
2059 int update_mask)
2060 {
2061 CC_SRC = eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
2062 CC_OP = CC_OP_EFLAGS;
2063 env->df = 1 - (2 * ((eflags >> 10) & 1));
2064 env->eflags = (env->eflags & ~update_mask) |
2065 (eflags & update_mask) | 0x2;
2066 }
2067
2068 /* load efer and update the corresponding hflags. XXX: do consistency
2069 checks with cpuid bits? */
2070 static inline void cpu_load_efer(CPUX86State *env, uint64_t val)
2071 {
2072 env->efer = val;
2073 env->hflags &= ~(HF_LMA_MASK | HF_SVME_MASK);
2074 if (env->efer & MSR_EFER_LMA) {
2075 env->hflags |= HF_LMA_MASK;
2076 }
2077 if (env->efer & MSR_EFER_SVME) {
2078 env->hflags |= HF_SVME_MASK;
2079 }
2080 }
2081
2082 static inline MemTxAttrs cpu_get_mem_attrs(CPUX86State *env)
2083 {
2084 return ((MemTxAttrs) { .secure = (env->hflags & HF_SMM_MASK) != 0 });
2085 }
2086
2087 static inline int32_t x86_get_a20_mask(CPUX86State *env)
2088 {
2089 if (env->hflags & HF_SMM_MASK) {
2090 return -1;
2091 } else {
2092 return env->a20_mask;
2093 }
2094 }
2095
2096 static inline bool cpu_has_vmx(CPUX86State *env)
2097 {
2098 return env->features[FEAT_1_ECX] & CPUID_EXT_VMX;
2099 }
2100
2101 /*
2102 * In order for a vCPU to enter VMX operation it must have CR4.VMXE set.
2103 * Since it was set, CR4.VMXE must remain set as long as vCPU is in
2104 * VMX operation. This is because CR4.VMXE is one of the bits set
2105 * in MSR_IA32_VMX_CR4_FIXED1.
2106 *
2107 * There is one exception to above statement when vCPU enters SMM mode.
2108 * When a vCPU enters SMM mode, it temporarily exit VMX operation and
2109 * may also reset CR4.VMXE during execution in SMM mode.
2110 * When vCPU exits SMM mode, vCPU state is restored to be in VMX operation
2111 * and CR4.VMXE is restored to it's original value of being set.
2112 *
2113 * Therefore, when vCPU is not in SMM mode, we can infer whether
2114 * VMX is being used by examining CR4.VMXE. Otherwise, we cannot
2115 * know for certain.
2116 */
2117 static inline bool cpu_vmx_maybe_enabled(CPUX86State *env)
2118 {
2119 return cpu_has_vmx(env) &&
2120 ((env->cr[4] & CR4_VMXE_MASK) || (env->hflags & HF_SMM_MASK));
2121 }
2122
2123 /* fpu_helper.c */
2124 void update_fp_status(CPUX86State *env);
2125 void update_mxcsr_status(CPUX86State *env);
2126
2127 static inline void cpu_set_mxcsr(CPUX86State *env, uint32_t mxcsr)
2128 {
2129 env->mxcsr = mxcsr;
2130 if (tcg_enabled()) {
2131 update_mxcsr_status(env);
2132 }
2133 }
2134
2135 static inline void cpu_set_fpuc(CPUX86State *env, uint16_t fpuc)
2136 {
2137 env->fpuc = fpuc;
2138 if (tcg_enabled()) {
2139 update_fp_status(env);
2140 }
2141 }
2142
2143 /* mem_helper.c */
2144 void helper_lock_init(void);
2145
2146 /* svm_helper.c */
2147 void cpu_svm_check_intercept_param(CPUX86State *env1, uint32_t type,
2148 uint64_t param, uintptr_t retaddr);
2149 void QEMU_NORETURN cpu_vmexit(CPUX86State *nenv, uint32_t exit_code,
2150 uint64_t exit_info_1, uintptr_t retaddr);
2151 void do_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1);
2152
2153 /* seg_helper.c */
2154 void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw);
2155
2156 /* smm_helper.c */
2157 void do_smm_enter(X86CPU *cpu);
2158
2159 /* apic.c */
2160 void cpu_report_tpr_access(CPUX86State *env, TPRAccess access);
2161 void apic_handle_tpr_access_report(DeviceState *d, target_ulong ip,
2162 TPRAccess access);
2163
2164
2165 /* Change the value of a KVM-specific default
2166 *
2167 * If value is NULL, no default will be set and the original
2168 * value from the CPU model table will be kept.
2169 *
2170 * It is valid to call this function only for properties that
2171 * are already present in the kvm_default_props table.
2172 */
2173 void x86_cpu_change_kvm_default(const char *prop, const char *value);
2174
2175 /* Special values for X86CPUVersion: */
2176
2177 /* Resolve to latest CPU version */
2178 #define CPU_VERSION_LATEST -1
2179
2180 /*
2181 * Resolve to version defined by current machine type.
2182 * See x86_cpu_set_default_version()
2183 */
2184 #define CPU_VERSION_AUTO -2
2185
2186 /* Don't resolve to any versioned CPU models, like old QEMU versions */
2187 #define CPU_VERSION_LEGACY 0
2188
2189 typedef int X86CPUVersion;
2190
2191 /*
2192 * Set default CPU model version for CPU models having
2193 * version == CPU_VERSION_AUTO.
2194 */
2195 void x86_cpu_set_default_version(X86CPUVersion version);
2196
2197 /* Return name of 32-bit register, from a R_* constant */
2198 const char *get_register_name_32(unsigned int reg);
2199
2200 void enable_compat_apic_id_mode(void);
2201
2202 #define APIC_DEFAULT_ADDRESS 0xfee00000
2203 #define APIC_SPACE_SIZE 0x100000
2204
2205 void x86_cpu_dump_local_apic_state(CPUState *cs, int flags);
2206
2207 /* cpu.c */
2208 bool cpu_is_bsp(X86CPU *cpu);
2209
2210 void x86_cpu_xrstor_all_areas(X86CPU *cpu, const X86XSaveArea *buf);
2211 void x86_cpu_xsave_all_areas(X86CPU *cpu, X86XSaveArea *buf);
2212 void x86_update_hflags(CPUX86State* env);
2213
2214 static inline bool hyperv_feat_enabled(X86CPU *cpu, int feat)
2215 {
2216 return !!(cpu->hyperv_features & BIT(feat));
2217 }
2218
2219 #endif /* I386_CPU_H */