]> git.proxmox.com Git - mirror_qemu.git/blob - target/i386/cpu.h
i386: Add new Hygon 'Dhyana' CPU model
[mirror_qemu.git] / target / i386 / cpu.h
1
2 /*
3 * i386 virtual CPU header
4 *
5 * Copyright (c) 2003 Fabrice Bellard
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #ifndef I386_CPU_H
22 #define I386_CPU_H
23
24 #include "qemu-common.h"
25 #include "cpu-qom.h"
26 #include "hyperv-proto.h"
27
28 #ifdef TARGET_X86_64
29 #define TARGET_LONG_BITS 64
30 #else
31 #define TARGET_LONG_BITS 32
32 #endif
33
34 #include "exec/cpu-defs.h"
35
36 /* The x86 has a strong memory model with some store-after-load re-ordering */
37 #define TCG_GUEST_DEFAULT_MO (TCG_MO_ALL & ~TCG_MO_ST_LD)
38
39 /* Maximum instruction code size */
40 #define TARGET_MAX_INSN_SIZE 16
41
42 /* support for self modifying code even if the modified instruction is
43 close to the modifying instruction */
44 #define TARGET_HAS_PRECISE_SMC
45
46 #ifdef TARGET_X86_64
47 #define I386_ELF_MACHINE EM_X86_64
48 #define ELF_MACHINE_UNAME "x86_64"
49 #else
50 #define I386_ELF_MACHINE EM_386
51 #define ELF_MACHINE_UNAME "i686"
52 #endif
53
54 #define CPUArchState struct CPUX86State
55
56 enum {
57 R_EAX = 0,
58 R_ECX = 1,
59 R_EDX = 2,
60 R_EBX = 3,
61 R_ESP = 4,
62 R_EBP = 5,
63 R_ESI = 6,
64 R_EDI = 7,
65 R_R8 = 8,
66 R_R9 = 9,
67 R_R10 = 10,
68 R_R11 = 11,
69 R_R12 = 12,
70 R_R13 = 13,
71 R_R14 = 14,
72 R_R15 = 15,
73
74 R_AL = 0,
75 R_CL = 1,
76 R_DL = 2,
77 R_BL = 3,
78 R_AH = 4,
79 R_CH = 5,
80 R_DH = 6,
81 R_BH = 7,
82 };
83
84 typedef enum X86Seg {
85 R_ES = 0,
86 R_CS = 1,
87 R_SS = 2,
88 R_DS = 3,
89 R_FS = 4,
90 R_GS = 5,
91 R_LDTR = 6,
92 R_TR = 7,
93 } X86Seg;
94
95 /* segment descriptor fields */
96 #define DESC_G_SHIFT 23
97 #define DESC_G_MASK (1 << DESC_G_SHIFT)
98 #define DESC_B_SHIFT 22
99 #define DESC_B_MASK (1 << DESC_B_SHIFT)
100 #define DESC_L_SHIFT 21 /* x86_64 only : 64 bit code segment */
101 #define DESC_L_MASK (1 << DESC_L_SHIFT)
102 #define DESC_AVL_SHIFT 20
103 #define DESC_AVL_MASK (1 << DESC_AVL_SHIFT)
104 #define DESC_P_SHIFT 15
105 #define DESC_P_MASK (1 << DESC_P_SHIFT)
106 #define DESC_DPL_SHIFT 13
107 #define DESC_DPL_MASK (3 << DESC_DPL_SHIFT)
108 #define DESC_S_SHIFT 12
109 #define DESC_S_MASK (1 << DESC_S_SHIFT)
110 #define DESC_TYPE_SHIFT 8
111 #define DESC_TYPE_MASK (15 << DESC_TYPE_SHIFT)
112 #define DESC_A_MASK (1 << 8)
113
114 #define DESC_CS_MASK (1 << 11) /* 1=code segment 0=data segment */
115 #define DESC_C_MASK (1 << 10) /* code: conforming */
116 #define DESC_R_MASK (1 << 9) /* code: readable */
117
118 #define DESC_E_MASK (1 << 10) /* data: expansion direction */
119 #define DESC_W_MASK (1 << 9) /* data: writable */
120
121 #define DESC_TSS_BUSY_MASK (1 << 9)
122
123 /* eflags masks */
124 #define CC_C 0x0001
125 #define CC_P 0x0004
126 #define CC_A 0x0010
127 #define CC_Z 0x0040
128 #define CC_S 0x0080
129 #define CC_O 0x0800
130
131 #define TF_SHIFT 8
132 #define IOPL_SHIFT 12
133 #define VM_SHIFT 17
134
135 #define TF_MASK 0x00000100
136 #define IF_MASK 0x00000200
137 #define DF_MASK 0x00000400
138 #define IOPL_MASK 0x00003000
139 #define NT_MASK 0x00004000
140 #define RF_MASK 0x00010000
141 #define VM_MASK 0x00020000
142 #define AC_MASK 0x00040000
143 #define VIF_MASK 0x00080000
144 #define VIP_MASK 0x00100000
145 #define ID_MASK 0x00200000
146
147 /* hidden flags - used internally by qemu to represent additional cpu
148 states. Only the INHIBIT_IRQ, SMM and SVMI are not redundant. We
149 avoid using the IOPL_MASK, TF_MASK, VM_MASK and AC_MASK bit
150 positions to ease oring with eflags. */
151 /* current cpl */
152 #define HF_CPL_SHIFT 0
153 /* true if hardware interrupts must be disabled for next instruction */
154 #define HF_INHIBIT_IRQ_SHIFT 3
155 /* 16 or 32 segments */
156 #define HF_CS32_SHIFT 4
157 #define HF_SS32_SHIFT 5
158 /* zero base for DS, ES and SS : can be '0' only in 32 bit CS segment */
159 #define HF_ADDSEG_SHIFT 6
160 /* copy of CR0.PE (protected mode) */
161 #define HF_PE_SHIFT 7
162 #define HF_TF_SHIFT 8 /* must be same as eflags */
163 #define HF_MP_SHIFT 9 /* the order must be MP, EM, TS */
164 #define HF_EM_SHIFT 10
165 #define HF_TS_SHIFT 11
166 #define HF_IOPL_SHIFT 12 /* must be same as eflags */
167 #define HF_LMA_SHIFT 14 /* only used on x86_64: long mode active */
168 #define HF_CS64_SHIFT 15 /* only used on x86_64: 64 bit code segment */
169 #define HF_RF_SHIFT 16 /* must be same as eflags */
170 #define HF_VM_SHIFT 17 /* must be same as eflags */
171 #define HF_AC_SHIFT 18 /* must be same as eflags */
172 #define HF_SMM_SHIFT 19 /* CPU in SMM mode */
173 #define HF_SVME_SHIFT 20 /* SVME enabled (copy of EFER.SVME) */
174 #define HF_GUEST_SHIFT 21 /* SVM intercepts are active */
175 #define HF_OSFXSR_SHIFT 22 /* CR4.OSFXSR */
176 #define HF_SMAP_SHIFT 23 /* CR4.SMAP */
177 #define HF_IOBPT_SHIFT 24 /* an io breakpoint enabled */
178 #define HF_MPX_EN_SHIFT 25 /* MPX Enabled (CR4+XCR0+BNDCFGx) */
179 #define HF_MPX_IU_SHIFT 26 /* BND registers in-use */
180
181 #define HF_CPL_MASK (3 << HF_CPL_SHIFT)
182 #define HF_INHIBIT_IRQ_MASK (1 << HF_INHIBIT_IRQ_SHIFT)
183 #define HF_CS32_MASK (1 << HF_CS32_SHIFT)
184 #define HF_SS32_MASK (1 << HF_SS32_SHIFT)
185 #define HF_ADDSEG_MASK (1 << HF_ADDSEG_SHIFT)
186 #define HF_PE_MASK (1 << HF_PE_SHIFT)
187 #define HF_TF_MASK (1 << HF_TF_SHIFT)
188 #define HF_MP_MASK (1 << HF_MP_SHIFT)
189 #define HF_EM_MASK (1 << HF_EM_SHIFT)
190 #define HF_TS_MASK (1 << HF_TS_SHIFT)
191 #define HF_IOPL_MASK (3 << HF_IOPL_SHIFT)
192 #define HF_LMA_MASK (1 << HF_LMA_SHIFT)
193 #define HF_CS64_MASK (1 << HF_CS64_SHIFT)
194 #define HF_RF_MASK (1 << HF_RF_SHIFT)
195 #define HF_VM_MASK (1 << HF_VM_SHIFT)
196 #define HF_AC_MASK (1 << HF_AC_SHIFT)
197 #define HF_SMM_MASK (1 << HF_SMM_SHIFT)
198 #define HF_SVME_MASK (1 << HF_SVME_SHIFT)
199 #define HF_GUEST_MASK (1 << HF_GUEST_SHIFT)
200 #define HF_OSFXSR_MASK (1 << HF_OSFXSR_SHIFT)
201 #define HF_SMAP_MASK (1 << HF_SMAP_SHIFT)
202 #define HF_IOBPT_MASK (1 << HF_IOBPT_SHIFT)
203 #define HF_MPX_EN_MASK (1 << HF_MPX_EN_SHIFT)
204 #define HF_MPX_IU_MASK (1 << HF_MPX_IU_SHIFT)
205
206 /* hflags2 */
207
208 #define HF2_GIF_SHIFT 0 /* if set CPU takes interrupts */
209 #define HF2_HIF_SHIFT 1 /* value of IF_MASK when entering SVM */
210 #define HF2_NMI_SHIFT 2 /* CPU serving NMI */
211 #define HF2_VINTR_SHIFT 3 /* value of V_INTR_MASKING bit */
212 #define HF2_SMM_INSIDE_NMI_SHIFT 4 /* CPU serving SMI nested inside NMI */
213 #define HF2_MPX_PR_SHIFT 5 /* BNDCFGx.BNDPRESERVE */
214 #define HF2_NPT_SHIFT 6 /* Nested Paging enabled */
215
216 #define HF2_GIF_MASK (1 << HF2_GIF_SHIFT)
217 #define HF2_HIF_MASK (1 << HF2_HIF_SHIFT)
218 #define HF2_NMI_MASK (1 << HF2_NMI_SHIFT)
219 #define HF2_VINTR_MASK (1 << HF2_VINTR_SHIFT)
220 #define HF2_SMM_INSIDE_NMI_MASK (1 << HF2_SMM_INSIDE_NMI_SHIFT)
221 #define HF2_MPX_PR_MASK (1 << HF2_MPX_PR_SHIFT)
222 #define HF2_NPT_MASK (1 << HF2_NPT_SHIFT)
223
224 #define CR0_PE_SHIFT 0
225 #define CR0_MP_SHIFT 1
226
227 #define CR0_PE_MASK (1U << 0)
228 #define CR0_MP_MASK (1U << 1)
229 #define CR0_EM_MASK (1U << 2)
230 #define CR0_TS_MASK (1U << 3)
231 #define CR0_ET_MASK (1U << 4)
232 #define CR0_NE_MASK (1U << 5)
233 #define CR0_WP_MASK (1U << 16)
234 #define CR0_AM_MASK (1U << 18)
235 #define CR0_PG_MASK (1U << 31)
236
237 #define CR4_VME_MASK (1U << 0)
238 #define CR4_PVI_MASK (1U << 1)
239 #define CR4_TSD_MASK (1U << 2)
240 #define CR4_DE_MASK (1U << 3)
241 #define CR4_PSE_MASK (1U << 4)
242 #define CR4_PAE_MASK (1U << 5)
243 #define CR4_MCE_MASK (1U << 6)
244 #define CR4_PGE_MASK (1U << 7)
245 #define CR4_PCE_MASK (1U << 8)
246 #define CR4_OSFXSR_SHIFT 9
247 #define CR4_OSFXSR_MASK (1U << CR4_OSFXSR_SHIFT)
248 #define CR4_OSXMMEXCPT_MASK (1U << 10)
249 #define CR4_LA57_MASK (1U << 12)
250 #define CR4_VMXE_MASK (1U << 13)
251 #define CR4_SMXE_MASK (1U << 14)
252 #define CR4_FSGSBASE_MASK (1U << 16)
253 #define CR4_PCIDE_MASK (1U << 17)
254 #define CR4_OSXSAVE_MASK (1U << 18)
255 #define CR4_SMEP_MASK (1U << 20)
256 #define CR4_SMAP_MASK (1U << 21)
257 #define CR4_PKE_MASK (1U << 22)
258
259 #define DR6_BD (1 << 13)
260 #define DR6_BS (1 << 14)
261 #define DR6_BT (1 << 15)
262 #define DR6_FIXED_1 0xffff0ff0
263
264 #define DR7_GD (1 << 13)
265 #define DR7_TYPE_SHIFT 16
266 #define DR7_LEN_SHIFT 18
267 #define DR7_FIXED_1 0x00000400
268 #define DR7_GLOBAL_BP_MASK 0xaa
269 #define DR7_LOCAL_BP_MASK 0x55
270 #define DR7_MAX_BP 4
271 #define DR7_TYPE_BP_INST 0x0
272 #define DR7_TYPE_DATA_WR 0x1
273 #define DR7_TYPE_IO_RW 0x2
274 #define DR7_TYPE_DATA_RW 0x3
275
276 #define PG_PRESENT_BIT 0
277 #define PG_RW_BIT 1
278 #define PG_USER_BIT 2
279 #define PG_PWT_BIT 3
280 #define PG_PCD_BIT 4
281 #define PG_ACCESSED_BIT 5
282 #define PG_DIRTY_BIT 6
283 #define PG_PSE_BIT 7
284 #define PG_GLOBAL_BIT 8
285 #define PG_PSE_PAT_BIT 12
286 #define PG_PKRU_BIT 59
287 #define PG_NX_BIT 63
288
289 #define PG_PRESENT_MASK (1 << PG_PRESENT_BIT)
290 #define PG_RW_MASK (1 << PG_RW_BIT)
291 #define PG_USER_MASK (1 << PG_USER_BIT)
292 #define PG_PWT_MASK (1 << PG_PWT_BIT)
293 #define PG_PCD_MASK (1 << PG_PCD_BIT)
294 #define PG_ACCESSED_MASK (1 << PG_ACCESSED_BIT)
295 #define PG_DIRTY_MASK (1 << PG_DIRTY_BIT)
296 #define PG_PSE_MASK (1 << PG_PSE_BIT)
297 #define PG_GLOBAL_MASK (1 << PG_GLOBAL_BIT)
298 #define PG_PSE_PAT_MASK (1 << PG_PSE_PAT_BIT)
299 #define PG_ADDRESS_MASK 0x000ffffffffff000LL
300 #define PG_HI_RSVD_MASK (PG_ADDRESS_MASK & ~PHYS_ADDR_MASK)
301 #define PG_HI_USER_MASK 0x7ff0000000000000LL
302 #define PG_PKRU_MASK (15ULL << PG_PKRU_BIT)
303 #define PG_NX_MASK (1ULL << PG_NX_BIT)
304
305 #define PG_ERROR_W_BIT 1
306
307 #define PG_ERROR_P_MASK 0x01
308 #define PG_ERROR_W_MASK (1 << PG_ERROR_W_BIT)
309 #define PG_ERROR_U_MASK 0x04
310 #define PG_ERROR_RSVD_MASK 0x08
311 #define PG_ERROR_I_D_MASK 0x10
312 #define PG_ERROR_PK_MASK 0x20
313
314 #define MCG_CTL_P (1ULL<<8) /* MCG_CAP register available */
315 #define MCG_SER_P (1ULL<<24) /* MCA recovery/new status bits */
316 #define MCG_LMCE_P (1ULL<<27) /* Local Machine Check Supported */
317
318 #define MCE_CAP_DEF (MCG_CTL_P|MCG_SER_P)
319 #define MCE_BANKS_DEF 10
320
321 #define MCG_CAP_BANKS_MASK 0xff
322
323 #define MCG_STATUS_RIPV (1ULL<<0) /* restart ip valid */
324 #define MCG_STATUS_EIPV (1ULL<<1) /* ip points to correct instruction */
325 #define MCG_STATUS_MCIP (1ULL<<2) /* machine check in progress */
326 #define MCG_STATUS_LMCE (1ULL<<3) /* Local MCE signaled */
327
328 #define MCG_EXT_CTL_LMCE_EN (1ULL<<0) /* Local MCE enabled */
329
330 #define MCI_STATUS_VAL (1ULL<<63) /* valid error */
331 #define MCI_STATUS_OVER (1ULL<<62) /* previous errors lost */
332 #define MCI_STATUS_UC (1ULL<<61) /* uncorrected error */
333 #define MCI_STATUS_EN (1ULL<<60) /* error enabled */
334 #define MCI_STATUS_MISCV (1ULL<<59) /* misc error reg. valid */
335 #define MCI_STATUS_ADDRV (1ULL<<58) /* addr reg. valid */
336 #define MCI_STATUS_PCC (1ULL<<57) /* processor context corrupt */
337 #define MCI_STATUS_S (1ULL<<56) /* Signaled machine check */
338 #define MCI_STATUS_AR (1ULL<<55) /* Action required */
339
340 /* MISC register defines */
341 #define MCM_ADDR_SEGOFF 0 /* segment offset */
342 #define MCM_ADDR_LINEAR 1 /* linear address */
343 #define MCM_ADDR_PHYS 2 /* physical address */
344 #define MCM_ADDR_MEM 3 /* memory address */
345 #define MCM_ADDR_GENERIC 7 /* generic */
346
347 #define MSR_IA32_TSC 0x10
348 #define MSR_IA32_APICBASE 0x1b
349 #define MSR_IA32_APICBASE_BSP (1<<8)
350 #define MSR_IA32_APICBASE_ENABLE (1<<11)
351 #define MSR_IA32_APICBASE_EXTD (1 << 10)
352 #define MSR_IA32_APICBASE_BASE (0xfffffU<<12)
353 #define MSR_IA32_FEATURE_CONTROL 0x0000003a
354 #define MSR_TSC_ADJUST 0x0000003b
355 #define MSR_IA32_SPEC_CTRL 0x48
356 #define MSR_VIRT_SSBD 0xc001011f
357 #define MSR_IA32_PRED_CMD 0x49
358 #define MSR_IA32_ARCH_CAPABILITIES 0x10a
359 #define MSR_IA32_TSCDEADLINE 0x6e0
360
361 #define FEATURE_CONTROL_LOCKED (1<<0)
362 #define FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX (1<<2)
363 #define FEATURE_CONTROL_LMCE (1<<20)
364
365 #define MSR_P6_PERFCTR0 0xc1
366
367 #define MSR_IA32_SMBASE 0x9e
368 #define MSR_SMI_COUNT 0x34
369 #define MSR_MTRRcap 0xfe
370 #define MSR_MTRRcap_VCNT 8
371 #define MSR_MTRRcap_FIXRANGE_SUPPORT (1 << 8)
372 #define MSR_MTRRcap_WC_SUPPORTED (1 << 10)
373
374 #define MSR_IA32_SYSENTER_CS 0x174
375 #define MSR_IA32_SYSENTER_ESP 0x175
376 #define MSR_IA32_SYSENTER_EIP 0x176
377
378 #define MSR_MCG_CAP 0x179
379 #define MSR_MCG_STATUS 0x17a
380 #define MSR_MCG_CTL 0x17b
381 #define MSR_MCG_EXT_CTL 0x4d0
382
383 #define MSR_P6_EVNTSEL0 0x186
384
385 #define MSR_IA32_PERF_STATUS 0x198
386
387 #define MSR_IA32_MISC_ENABLE 0x1a0
388 /* Indicates good rep/movs microcode on some processors: */
389 #define MSR_IA32_MISC_ENABLE_DEFAULT 1
390
391 #define MSR_MTRRphysBase(reg) (0x200 + 2 * (reg))
392 #define MSR_MTRRphysMask(reg) (0x200 + 2 * (reg) + 1)
393
394 #define MSR_MTRRphysIndex(addr) ((((addr) & ~1u) - 0x200) / 2)
395
396 #define MSR_MTRRfix64K_00000 0x250
397 #define MSR_MTRRfix16K_80000 0x258
398 #define MSR_MTRRfix16K_A0000 0x259
399 #define MSR_MTRRfix4K_C0000 0x268
400 #define MSR_MTRRfix4K_C8000 0x269
401 #define MSR_MTRRfix4K_D0000 0x26a
402 #define MSR_MTRRfix4K_D8000 0x26b
403 #define MSR_MTRRfix4K_E0000 0x26c
404 #define MSR_MTRRfix4K_E8000 0x26d
405 #define MSR_MTRRfix4K_F0000 0x26e
406 #define MSR_MTRRfix4K_F8000 0x26f
407
408 #define MSR_PAT 0x277
409
410 #define MSR_MTRRdefType 0x2ff
411
412 #define MSR_CORE_PERF_FIXED_CTR0 0x309
413 #define MSR_CORE_PERF_FIXED_CTR1 0x30a
414 #define MSR_CORE_PERF_FIXED_CTR2 0x30b
415 #define MSR_CORE_PERF_FIXED_CTR_CTRL 0x38d
416 #define MSR_CORE_PERF_GLOBAL_STATUS 0x38e
417 #define MSR_CORE_PERF_GLOBAL_CTRL 0x38f
418 #define MSR_CORE_PERF_GLOBAL_OVF_CTRL 0x390
419
420 #define MSR_MC0_CTL 0x400
421 #define MSR_MC0_STATUS 0x401
422 #define MSR_MC0_ADDR 0x402
423 #define MSR_MC0_MISC 0x403
424
425 #define MSR_IA32_RTIT_OUTPUT_BASE 0x560
426 #define MSR_IA32_RTIT_OUTPUT_MASK 0x561
427 #define MSR_IA32_RTIT_CTL 0x570
428 #define MSR_IA32_RTIT_STATUS 0x571
429 #define MSR_IA32_RTIT_CR3_MATCH 0x572
430 #define MSR_IA32_RTIT_ADDR0_A 0x580
431 #define MSR_IA32_RTIT_ADDR0_B 0x581
432 #define MSR_IA32_RTIT_ADDR1_A 0x582
433 #define MSR_IA32_RTIT_ADDR1_B 0x583
434 #define MSR_IA32_RTIT_ADDR2_A 0x584
435 #define MSR_IA32_RTIT_ADDR2_B 0x585
436 #define MSR_IA32_RTIT_ADDR3_A 0x586
437 #define MSR_IA32_RTIT_ADDR3_B 0x587
438 #define MAX_RTIT_ADDRS 8
439
440 #define MSR_EFER 0xc0000080
441
442 #define MSR_EFER_SCE (1 << 0)
443 #define MSR_EFER_LME (1 << 8)
444 #define MSR_EFER_LMA (1 << 10)
445 #define MSR_EFER_NXE (1 << 11)
446 #define MSR_EFER_SVME (1 << 12)
447 #define MSR_EFER_FFXSR (1 << 14)
448
449 #define MSR_STAR 0xc0000081
450 #define MSR_LSTAR 0xc0000082
451 #define MSR_CSTAR 0xc0000083
452 #define MSR_FMASK 0xc0000084
453 #define MSR_FSBASE 0xc0000100
454 #define MSR_GSBASE 0xc0000101
455 #define MSR_KERNELGSBASE 0xc0000102
456 #define MSR_TSC_AUX 0xc0000103
457
458 #define MSR_VM_HSAVE_PA 0xc0010117
459
460 #define MSR_IA32_BNDCFGS 0x00000d90
461 #define MSR_IA32_XSS 0x00000da0
462
463 #define XSTATE_FP_BIT 0
464 #define XSTATE_SSE_BIT 1
465 #define XSTATE_YMM_BIT 2
466 #define XSTATE_BNDREGS_BIT 3
467 #define XSTATE_BNDCSR_BIT 4
468 #define XSTATE_OPMASK_BIT 5
469 #define XSTATE_ZMM_Hi256_BIT 6
470 #define XSTATE_Hi16_ZMM_BIT 7
471 #define XSTATE_PKRU_BIT 9
472
473 #define XSTATE_FP_MASK (1ULL << XSTATE_FP_BIT)
474 #define XSTATE_SSE_MASK (1ULL << XSTATE_SSE_BIT)
475 #define XSTATE_YMM_MASK (1ULL << XSTATE_YMM_BIT)
476 #define XSTATE_BNDREGS_MASK (1ULL << XSTATE_BNDREGS_BIT)
477 #define XSTATE_BNDCSR_MASK (1ULL << XSTATE_BNDCSR_BIT)
478 #define XSTATE_OPMASK_MASK (1ULL << XSTATE_OPMASK_BIT)
479 #define XSTATE_ZMM_Hi256_MASK (1ULL << XSTATE_ZMM_Hi256_BIT)
480 #define XSTATE_Hi16_ZMM_MASK (1ULL << XSTATE_Hi16_ZMM_BIT)
481 #define XSTATE_PKRU_MASK (1ULL << XSTATE_PKRU_BIT)
482
483 /* CPUID feature words */
484 typedef enum FeatureWord {
485 FEAT_1_EDX, /* CPUID[1].EDX */
486 FEAT_1_ECX, /* CPUID[1].ECX */
487 FEAT_7_0_EBX, /* CPUID[EAX=7,ECX=0].EBX */
488 FEAT_7_0_ECX, /* CPUID[EAX=7,ECX=0].ECX */
489 FEAT_7_0_EDX, /* CPUID[EAX=7,ECX=0].EDX */
490 FEAT_8000_0001_EDX, /* CPUID[8000_0001].EDX */
491 FEAT_8000_0001_ECX, /* CPUID[8000_0001].ECX */
492 FEAT_8000_0007_EDX, /* CPUID[8000_0007].EDX */
493 FEAT_8000_0008_EBX, /* CPUID[8000_0008].EBX */
494 FEAT_C000_0001_EDX, /* CPUID[C000_0001].EDX */
495 FEAT_KVM, /* CPUID[4000_0001].EAX (KVM_CPUID_FEATURES) */
496 FEAT_KVM_HINTS, /* CPUID[4000_0001].EDX */
497 FEAT_HYPERV_EAX, /* CPUID[4000_0003].EAX */
498 FEAT_HYPERV_EBX, /* CPUID[4000_0003].EBX */
499 FEAT_HYPERV_EDX, /* CPUID[4000_0003].EDX */
500 FEAT_HV_RECOMM_EAX, /* CPUID[4000_0004].EAX */
501 FEAT_HV_NESTED_EAX, /* CPUID[4000_000A].EAX */
502 FEAT_SVM, /* CPUID[8000_000A].EDX */
503 FEAT_XSAVE, /* CPUID[EAX=0xd,ECX=1].EAX */
504 FEAT_6_EAX, /* CPUID[6].EAX */
505 FEAT_XSAVE_COMP_LO, /* CPUID[EAX=0xd,ECX=0].EAX */
506 FEAT_XSAVE_COMP_HI, /* CPUID[EAX=0xd,ECX=0].EDX */
507 FEAT_ARCH_CAPABILITIES,
508 FEATURE_WORDS,
509 } FeatureWord;
510
511 typedef uint32_t FeatureWordArray[FEATURE_WORDS];
512
513 /* cpuid_features bits */
514 #define CPUID_FP87 (1U << 0)
515 #define CPUID_VME (1U << 1)
516 #define CPUID_DE (1U << 2)
517 #define CPUID_PSE (1U << 3)
518 #define CPUID_TSC (1U << 4)
519 #define CPUID_MSR (1U << 5)
520 #define CPUID_PAE (1U << 6)
521 #define CPUID_MCE (1U << 7)
522 #define CPUID_CX8 (1U << 8)
523 #define CPUID_APIC (1U << 9)
524 #define CPUID_SEP (1U << 11) /* sysenter/sysexit */
525 #define CPUID_MTRR (1U << 12)
526 #define CPUID_PGE (1U << 13)
527 #define CPUID_MCA (1U << 14)
528 #define CPUID_CMOV (1U << 15)
529 #define CPUID_PAT (1U << 16)
530 #define CPUID_PSE36 (1U << 17)
531 #define CPUID_PN (1U << 18)
532 #define CPUID_CLFLUSH (1U << 19)
533 #define CPUID_DTS (1U << 21)
534 #define CPUID_ACPI (1U << 22)
535 #define CPUID_MMX (1U << 23)
536 #define CPUID_FXSR (1U << 24)
537 #define CPUID_SSE (1U << 25)
538 #define CPUID_SSE2 (1U << 26)
539 #define CPUID_SS (1U << 27)
540 #define CPUID_HT (1U << 28)
541 #define CPUID_TM (1U << 29)
542 #define CPUID_IA64 (1U << 30)
543 #define CPUID_PBE (1U << 31)
544
545 #define CPUID_EXT_SSE3 (1U << 0)
546 #define CPUID_EXT_PCLMULQDQ (1U << 1)
547 #define CPUID_EXT_DTES64 (1U << 2)
548 #define CPUID_EXT_MONITOR (1U << 3)
549 #define CPUID_EXT_DSCPL (1U << 4)
550 #define CPUID_EXT_VMX (1U << 5)
551 #define CPUID_EXT_SMX (1U << 6)
552 #define CPUID_EXT_EST (1U << 7)
553 #define CPUID_EXT_TM2 (1U << 8)
554 #define CPUID_EXT_SSSE3 (1U << 9)
555 #define CPUID_EXT_CID (1U << 10)
556 #define CPUID_EXT_FMA (1U << 12)
557 #define CPUID_EXT_CX16 (1U << 13)
558 #define CPUID_EXT_XTPR (1U << 14)
559 #define CPUID_EXT_PDCM (1U << 15)
560 #define CPUID_EXT_PCID (1U << 17)
561 #define CPUID_EXT_DCA (1U << 18)
562 #define CPUID_EXT_SSE41 (1U << 19)
563 #define CPUID_EXT_SSE42 (1U << 20)
564 #define CPUID_EXT_X2APIC (1U << 21)
565 #define CPUID_EXT_MOVBE (1U << 22)
566 #define CPUID_EXT_POPCNT (1U << 23)
567 #define CPUID_EXT_TSC_DEADLINE_TIMER (1U << 24)
568 #define CPUID_EXT_AES (1U << 25)
569 #define CPUID_EXT_XSAVE (1U << 26)
570 #define CPUID_EXT_OSXSAVE (1U << 27)
571 #define CPUID_EXT_AVX (1U << 28)
572 #define CPUID_EXT_F16C (1U << 29)
573 #define CPUID_EXT_RDRAND (1U << 30)
574 #define CPUID_EXT_HYPERVISOR (1U << 31)
575
576 #define CPUID_EXT2_FPU (1U << 0)
577 #define CPUID_EXT2_VME (1U << 1)
578 #define CPUID_EXT2_DE (1U << 2)
579 #define CPUID_EXT2_PSE (1U << 3)
580 #define CPUID_EXT2_TSC (1U << 4)
581 #define CPUID_EXT2_MSR (1U << 5)
582 #define CPUID_EXT2_PAE (1U << 6)
583 #define CPUID_EXT2_MCE (1U << 7)
584 #define CPUID_EXT2_CX8 (1U << 8)
585 #define CPUID_EXT2_APIC (1U << 9)
586 #define CPUID_EXT2_SYSCALL (1U << 11)
587 #define CPUID_EXT2_MTRR (1U << 12)
588 #define CPUID_EXT2_PGE (1U << 13)
589 #define CPUID_EXT2_MCA (1U << 14)
590 #define CPUID_EXT2_CMOV (1U << 15)
591 #define CPUID_EXT2_PAT (1U << 16)
592 #define CPUID_EXT2_PSE36 (1U << 17)
593 #define CPUID_EXT2_MP (1U << 19)
594 #define CPUID_EXT2_NX (1U << 20)
595 #define CPUID_EXT2_MMXEXT (1U << 22)
596 #define CPUID_EXT2_MMX (1U << 23)
597 #define CPUID_EXT2_FXSR (1U << 24)
598 #define CPUID_EXT2_FFXSR (1U << 25)
599 #define CPUID_EXT2_PDPE1GB (1U << 26)
600 #define CPUID_EXT2_RDTSCP (1U << 27)
601 #define CPUID_EXT2_LM (1U << 29)
602 #define CPUID_EXT2_3DNOWEXT (1U << 30)
603 #define CPUID_EXT2_3DNOW (1U << 31)
604
605 /* CPUID[8000_0001].EDX bits that are aliase of CPUID[1].EDX bits on AMD CPUs */
606 #define CPUID_EXT2_AMD_ALIASES (CPUID_EXT2_FPU | CPUID_EXT2_VME | \
607 CPUID_EXT2_DE | CPUID_EXT2_PSE | \
608 CPUID_EXT2_TSC | CPUID_EXT2_MSR | \
609 CPUID_EXT2_PAE | CPUID_EXT2_MCE | \
610 CPUID_EXT2_CX8 | CPUID_EXT2_APIC | \
611 CPUID_EXT2_MTRR | CPUID_EXT2_PGE | \
612 CPUID_EXT2_MCA | CPUID_EXT2_CMOV | \
613 CPUID_EXT2_PAT | CPUID_EXT2_PSE36 | \
614 CPUID_EXT2_MMX | CPUID_EXT2_FXSR)
615
616 #define CPUID_EXT3_LAHF_LM (1U << 0)
617 #define CPUID_EXT3_CMP_LEG (1U << 1)
618 #define CPUID_EXT3_SVM (1U << 2)
619 #define CPUID_EXT3_EXTAPIC (1U << 3)
620 #define CPUID_EXT3_CR8LEG (1U << 4)
621 #define CPUID_EXT3_ABM (1U << 5)
622 #define CPUID_EXT3_SSE4A (1U << 6)
623 #define CPUID_EXT3_MISALIGNSSE (1U << 7)
624 #define CPUID_EXT3_3DNOWPREFETCH (1U << 8)
625 #define CPUID_EXT3_OSVW (1U << 9)
626 #define CPUID_EXT3_IBS (1U << 10)
627 #define CPUID_EXT3_XOP (1U << 11)
628 #define CPUID_EXT3_SKINIT (1U << 12)
629 #define CPUID_EXT3_WDT (1U << 13)
630 #define CPUID_EXT3_LWP (1U << 15)
631 #define CPUID_EXT3_FMA4 (1U << 16)
632 #define CPUID_EXT3_TCE (1U << 17)
633 #define CPUID_EXT3_NODEID (1U << 19)
634 #define CPUID_EXT3_TBM (1U << 21)
635 #define CPUID_EXT3_TOPOEXT (1U << 22)
636 #define CPUID_EXT3_PERFCORE (1U << 23)
637 #define CPUID_EXT3_PERFNB (1U << 24)
638
639 #define CPUID_SVM_NPT (1U << 0)
640 #define CPUID_SVM_LBRV (1U << 1)
641 #define CPUID_SVM_SVMLOCK (1U << 2)
642 #define CPUID_SVM_NRIPSAVE (1U << 3)
643 #define CPUID_SVM_TSCSCALE (1U << 4)
644 #define CPUID_SVM_VMCBCLEAN (1U << 5)
645 #define CPUID_SVM_FLUSHASID (1U << 6)
646 #define CPUID_SVM_DECODEASSIST (1U << 7)
647 #define CPUID_SVM_PAUSEFILTER (1U << 10)
648 #define CPUID_SVM_PFTHRESHOLD (1U << 12)
649
650 #define CPUID_7_0_EBX_FSGSBASE (1U << 0)
651 #define CPUID_7_0_EBX_BMI1 (1U << 3)
652 #define CPUID_7_0_EBX_HLE (1U << 4)
653 #define CPUID_7_0_EBX_AVX2 (1U << 5)
654 #define CPUID_7_0_EBX_SMEP (1U << 7)
655 #define CPUID_7_0_EBX_BMI2 (1U << 8)
656 #define CPUID_7_0_EBX_ERMS (1U << 9)
657 #define CPUID_7_0_EBX_INVPCID (1U << 10)
658 #define CPUID_7_0_EBX_RTM (1U << 11)
659 #define CPUID_7_0_EBX_MPX (1U << 14)
660 #define CPUID_7_0_EBX_AVX512F (1U << 16) /* AVX-512 Foundation */
661 #define CPUID_7_0_EBX_AVX512DQ (1U << 17) /* AVX-512 Doubleword & Quadword Instrs */
662 #define CPUID_7_0_EBX_RDSEED (1U << 18)
663 #define CPUID_7_0_EBX_ADX (1U << 19)
664 #define CPUID_7_0_EBX_SMAP (1U << 20)
665 #define CPUID_7_0_EBX_AVX512IFMA (1U << 21) /* AVX-512 Integer Fused Multiply Add */
666 #define CPUID_7_0_EBX_PCOMMIT (1U << 22) /* Persistent Commit */
667 #define CPUID_7_0_EBX_CLFLUSHOPT (1U << 23) /* Flush a Cache Line Optimized */
668 #define CPUID_7_0_EBX_CLWB (1U << 24) /* Cache Line Write Back */
669 #define CPUID_7_0_EBX_INTEL_PT (1U << 25) /* Intel Processor Trace */
670 #define CPUID_7_0_EBX_AVX512PF (1U << 26) /* AVX-512 Prefetch */
671 #define CPUID_7_0_EBX_AVX512ER (1U << 27) /* AVX-512 Exponential and Reciprocal */
672 #define CPUID_7_0_EBX_AVX512CD (1U << 28) /* AVX-512 Conflict Detection */
673 #define CPUID_7_0_EBX_SHA_NI (1U << 29) /* SHA1/SHA256 Instruction Extensions */
674 #define CPUID_7_0_EBX_AVX512BW (1U << 30) /* AVX-512 Byte and Word Instructions */
675 #define CPUID_7_0_EBX_AVX512VL (1U << 31) /* AVX-512 Vector Length Extensions */
676
677 #define CPUID_7_0_ECX_AVX512BMI (1U << 1)
678 #define CPUID_7_0_ECX_VBMI (1U << 1) /* AVX-512 Vector Byte Manipulation Instrs */
679 #define CPUID_7_0_ECX_UMIP (1U << 2)
680 #define CPUID_7_0_ECX_PKU (1U << 3)
681 #define CPUID_7_0_ECX_OSPKE (1U << 4)
682 #define CPUID_7_0_ECX_VBMI2 (1U << 6) /* Additional VBMI Instrs */
683 #define CPUID_7_0_ECX_GFNI (1U << 8)
684 #define CPUID_7_0_ECX_VAES (1U << 9)
685 #define CPUID_7_0_ECX_VPCLMULQDQ (1U << 10)
686 #define CPUID_7_0_ECX_AVX512VNNI (1U << 11)
687 #define CPUID_7_0_ECX_AVX512BITALG (1U << 12)
688 #define CPUID_7_0_ECX_AVX512_VPOPCNTDQ (1U << 14) /* POPCNT for vectors of DW/QW */
689 #define CPUID_7_0_ECX_LA57 (1U << 16)
690 #define CPUID_7_0_ECX_RDPID (1U << 22)
691 #define CPUID_7_0_ECX_CLDEMOTE (1U << 25) /* CLDEMOTE Instruction */
692 #define CPUID_7_0_ECX_MOVDIRI (1U << 27) /* MOVDIRI Instruction */
693 #define CPUID_7_0_ECX_MOVDIR64B (1U << 28) /* MOVDIR64B Instruction */
694
695 #define CPUID_7_0_EDX_AVX512_4VNNIW (1U << 2) /* AVX512 Neural Network Instructions */
696 #define CPUID_7_0_EDX_AVX512_4FMAPS (1U << 3) /* AVX512 Multiply Accumulation Single Precision */
697 #define CPUID_7_0_EDX_SPEC_CTRL (1U << 26) /* Speculation Control */
698 #define CPUID_7_0_EDX_ARCH_CAPABILITIES (1U << 29) /*Arch Capabilities*/
699 #define CPUID_7_0_EDX_SPEC_CTRL_SSBD (1U << 31) /* Speculative Store Bypass Disable */
700
701 #define CPUID_8000_0008_EBX_WBNOINVD (1U << 9) /* Write back and
702 do not invalidate cache */
703 #define CPUID_8000_0008_EBX_IBPB (1U << 12) /* Indirect Branch Prediction Barrier */
704
705 #define CPUID_XSAVE_XSAVEOPT (1U << 0)
706 #define CPUID_XSAVE_XSAVEC (1U << 1)
707 #define CPUID_XSAVE_XGETBV1 (1U << 2)
708 #define CPUID_XSAVE_XSAVES (1U << 3)
709
710 #define CPUID_6_EAX_ARAT (1U << 2)
711
712 /* CPUID[0x80000007].EDX flags: */
713 #define CPUID_APM_INVTSC (1U << 8)
714
715 #define CPUID_VENDOR_SZ 12
716
717 #define CPUID_VENDOR_INTEL_1 0x756e6547 /* "Genu" */
718 #define CPUID_VENDOR_INTEL_2 0x49656e69 /* "ineI" */
719 #define CPUID_VENDOR_INTEL_3 0x6c65746e /* "ntel" */
720 #define CPUID_VENDOR_INTEL "GenuineIntel"
721
722 #define CPUID_VENDOR_AMD_1 0x68747541 /* "Auth" */
723 #define CPUID_VENDOR_AMD_2 0x69746e65 /* "enti" */
724 #define CPUID_VENDOR_AMD_3 0x444d4163 /* "cAMD" */
725 #define CPUID_VENDOR_AMD "AuthenticAMD"
726
727 #define CPUID_VENDOR_VIA "CentaurHauls"
728
729 #define CPUID_VENDOR_HYGON "HygonGenuine"
730
731 #define CPUID_MWAIT_IBE (1U << 1) /* Interrupts can exit capability */
732 #define CPUID_MWAIT_EMX (1U << 0) /* enumeration supported */
733
734 /* CPUID[0xB].ECX level types */
735 #define CPUID_TOPOLOGY_LEVEL_INVALID (0U << 8)
736 #define CPUID_TOPOLOGY_LEVEL_SMT (1U << 8)
737 #define CPUID_TOPOLOGY_LEVEL_CORE (2U << 8)
738
739 /* MSR Feature Bits */
740 #define MSR_ARCH_CAP_RDCL_NO (1U << 0)
741 #define MSR_ARCH_CAP_IBRS_ALL (1U << 1)
742 #define MSR_ARCH_CAP_RSBA (1U << 2)
743 #define MSR_ARCH_CAP_SKIP_L1DFL_VMENTRY (1U << 3)
744 #define MSR_ARCH_CAP_SSB_NO (1U << 4)
745
746 #ifndef HYPERV_SPINLOCK_NEVER_RETRY
747 #define HYPERV_SPINLOCK_NEVER_RETRY 0xFFFFFFFF
748 #endif
749
750 #define EXCP00_DIVZ 0
751 #define EXCP01_DB 1
752 #define EXCP02_NMI 2
753 #define EXCP03_INT3 3
754 #define EXCP04_INTO 4
755 #define EXCP05_BOUND 5
756 #define EXCP06_ILLOP 6
757 #define EXCP07_PREX 7
758 #define EXCP08_DBLE 8
759 #define EXCP09_XERR 9
760 #define EXCP0A_TSS 10
761 #define EXCP0B_NOSEG 11
762 #define EXCP0C_STACK 12
763 #define EXCP0D_GPF 13
764 #define EXCP0E_PAGE 14
765 #define EXCP10_COPR 16
766 #define EXCP11_ALGN 17
767 #define EXCP12_MCHK 18
768
769 #define EXCP_SYSCALL 0x100 /* only happens in user only emulation
770 for syscall instruction */
771 #define EXCP_VMEXIT 0x100
772
773 /* i386-specific interrupt pending bits. */
774 #define CPU_INTERRUPT_POLL CPU_INTERRUPT_TGT_EXT_1
775 #define CPU_INTERRUPT_SMI CPU_INTERRUPT_TGT_EXT_2
776 #define CPU_INTERRUPT_NMI CPU_INTERRUPT_TGT_EXT_3
777 #define CPU_INTERRUPT_MCE CPU_INTERRUPT_TGT_EXT_4
778 #define CPU_INTERRUPT_VIRQ CPU_INTERRUPT_TGT_INT_0
779 #define CPU_INTERRUPT_SIPI CPU_INTERRUPT_TGT_INT_1
780 #define CPU_INTERRUPT_TPR CPU_INTERRUPT_TGT_INT_2
781
782 /* Use a clearer name for this. */
783 #define CPU_INTERRUPT_INIT CPU_INTERRUPT_RESET
784
785 /* Instead of computing the condition codes after each x86 instruction,
786 * QEMU just stores one operand (called CC_SRC), the result
787 * (called CC_DST) and the type of operation (called CC_OP). When the
788 * condition codes are needed, the condition codes can be calculated
789 * using this information. Condition codes are not generated if they
790 * are only needed for conditional branches.
791 */
792 typedef enum {
793 CC_OP_DYNAMIC, /* must use dynamic code to get cc_op */
794 CC_OP_EFLAGS, /* all cc are explicitly computed, CC_SRC = flags */
795
796 CC_OP_MULB, /* modify all flags, C, O = (CC_SRC != 0) */
797 CC_OP_MULW,
798 CC_OP_MULL,
799 CC_OP_MULQ,
800
801 CC_OP_ADDB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
802 CC_OP_ADDW,
803 CC_OP_ADDL,
804 CC_OP_ADDQ,
805
806 CC_OP_ADCB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
807 CC_OP_ADCW,
808 CC_OP_ADCL,
809 CC_OP_ADCQ,
810
811 CC_OP_SUBB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
812 CC_OP_SUBW,
813 CC_OP_SUBL,
814 CC_OP_SUBQ,
815
816 CC_OP_SBBB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
817 CC_OP_SBBW,
818 CC_OP_SBBL,
819 CC_OP_SBBQ,
820
821 CC_OP_LOGICB, /* modify all flags, CC_DST = res */
822 CC_OP_LOGICW,
823 CC_OP_LOGICL,
824 CC_OP_LOGICQ,
825
826 CC_OP_INCB, /* modify all flags except, CC_DST = res, CC_SRC = C */
827 CC_OP_INCW,
828 CC_OP_INCL,
829 CC_OP_INCQ,
830
831 CC_OP_DECB, /* modify all flags except, CC_DST = res, CC_SRC = C */
832 CC_OP_DECW,
833 CC_OP_DECL,
834 CC_OP_DECQ,
835
836 CC_OP_SHLB, /* modify all flags, CC_DST = res, CC_SRC.msb = C */
837 CC_OP_SHLW,
838 CC_OP_SHLL,
839 CC_OP_SHLQ,
840
841 CC_OP_SARB, /* modify all flags, CC_DST = res, CC_SRC.lsb = C */
842 CC_OP_SARW,
843 CC_OP_SARL,
844 CC_OP_SARQ,
845
846 CC_OP_BMILGB, /* Z,S via CC_DST, C = SRC==0; O=0; P,A undefined */
847 CC_OP_BMILGW,
848 CC_OP_BMILGL,
849 CC_OP_BMILGQ,
850
851 CC_OP_ADCX, /* CC_DST = C, CC_SRC = rest. */
852 CC_OP_ADOX, /* CC_DST = O, CC_SRC = rest. */
853 CC_OP_ADCOX, /* CC_DST = C, CC_SRC2 = O, CC_SRC = rest. */
854
855 CC_OP_CLR, /* Z set, all other flags clear. */
856 CC_OP_POPCNT, /* Z via CC_SRC, all other flags clear. */
857
858 CC_OP_NB,
859 } CCOp;
860
861 typedef struct SegmentCache {
862 uint32_t selector;
863 target_ulong base;
864 uint32_t limit;
865 uint32_t flags;
866 } SegmentCache;
867
868 #define MMREG_UNION(n, bits) \
869 union n { \
870 uint8_t _b_##n[(bits)/8]; \
871 uint16_t _w_##n[(bits)/16]; \
872 uint32_t _l_##n[(bits)/32]; \
873 uint64_t _q_##n[(bits)/64]; \
874 float32 _s_##n[(bits)/32]; \
875 float64 _d_##n[(bits)/64]; \
876 }
877
878 typedef union {
879 uint8_t _b[16];
880 uint16_t _w[8];
881 uint32_t _l[4];
882 uint64_t _q[2];
883 } XMMReg;
884
885 typedef union {
886 uint8_t _b[32];
887 uint16_t _w[16];
888 uint32_t _l[8];
889 uint64_t _q[4];
890 } YMMReg;
891
892 typedef MMREG_UNION(ZMMReg, 512) ZMMReg;
893 typedef MMREG_UNION(MMXReg, 64) MMXReg;
894
895 typedef struct BNDReg {
896 uint64_t lb;
897 uint64_t ub;
898 } BNDReg;
899
900 typedef struct BNDCSReg {
901 uint64_t cfgu;
902 uint64_t sts;
903 } BNDCSReg;
904
905 #define BNDCFG_ENABLE 1ULL
906 #define BNDCFG_BNDPRESERVE 2ULL
907 #define BNDCFG_BDIR_MASK TARGET_PAGE_MASK
908
909 #ifdef HOST_WORDS_BIGENDIAN
910 #define ZMM_B(n) _b_ZMMReg[63 - (n)]
911 #define ZMM_W(n) _w_ZMMReg[31 - (n)]
912 #define ZMM_L(n) _l_ZMMReg[15 - (n)]
913 #define ZMM_S(n) _s_ZMMReg[15 - (n)]
914 #define ZMM_Q(n) _q_ZMMReg[7 - (n)]
915 #define ZMM_D(n) _d_ZMMReg[7 - (n)]
916
917 #define MMX_B(n) _b_MMXReg[7 - (n)]
918 #define MMX_W(n) _w_MMXReg[3 - (n)]
919 #define MMX_L(n) _l_MMXReg[1 - (n)]
920 #define MMX_S(n) _s_MMXReg[1 - (n)]
921 #else
922 #define ZMM_B(n) _b_ZMMReg[n]
923 #define ZMM_W(n) _w_ZMMReg[n]
924 #define ZMM_L(n) _l_ZMMReg[n]
925 #define ZMM_S(n) _s_ZMMReg[n]
926 #define ZMM_Q(n) _q_ZMMReg[n]
927 #define ZMM_D(n) _d_ZMMReg[n]
928
929 #define MMX_B(n) _b_MMXReg[n]
930 #define MMX_W(n) _w_MMXReg[n]
931 #define MMX_L(n) _l_MMXReg[n]
932 #define MMX_S(n) _s_MMXReg[n]
933 #endif
934 #define MMX_Q(n) _q_MMXReg[n]
935
936 typedef union {
937 floatx80 d __attribute__((aligned(16)));
938 MMXReg mmx;
939 } FPReg;
940
941 typedef struct {
942 uint64_t base;
943 uint64_t mask;
944 } MTRRVar;
945
946 #define CPU_NB_REGS64 16
947 #define CPU_NB_REGS32 8
948
949 #ifdef TARGET_X86_64
950 #define CPU_NB_REGS CPU_NB_REGS64
951 #else
952 #define CPU_NB_REGS CPU_NB_REGS32
953 #endif
954
955 #define MAX_FIXED_COUNTERS 3
956 #define MAX_GP_COUNTERS (MSR_IA32_PERF_STATUS - MSR_P6_EVNTSEL0)
957
958 #define NB_MMU_MODES 3
959 #define TARGET_INSN_START_EXTRA_WORDS 1
960
961 #define NB_OPMASK_REGS 8
962
963 /* CPU can't have 0xFFFFFFFF APIC ID, use that value to distinguish
964 * that APIC ID hasn't been set yet
965 */
966 #define UNASSIGNED_APIC_ID 0xFFFFFFFF
967
968 typedef union X86LegacyXSaveArea {
969 struct {
970 uint16_t fcw;
971 uint16_t fsw;
972 uint8_t ftw;
973 uint8_t reserved;
974 uint16_t fpop;
975 uint64_t fpip;
976 uint64_t fpdp;
977 uint32_t mxcsr;
978 uint32_t mxcsr_mask;
979 FPReg fpregs[8];
980 uint8_t xmm_regs[16][16];
981 };
982 uint8_t data[512];
983 } X86LegacyXSaveArea;
984
985 typedef struct X86XSaveHeader {
986 uint64_t xstate_bv;
987 uint64_t xcomp_bv;
988 uint64_t reserve0;
989 uint8_t reserved[40];
990 } X86XSaveHeader;
991
992 /* Ext. save area 2: AVX State */
993 typedef struct XSaveAVX {
994 uint8_t ymmh[16][16];
995 } XSaveAVX;
996
997 /* Ext. save area 3: BNDREG */
998 typedef struct XSaveBNDREG {
999 BNDReg bnd_regs[4];
1000 } XSaveBNDREG;
1001
1002 /* Ext. save area 4: BNDCSR */
1003 typedef union XSaveBNDCSR {
1004 BNDCSReg bndcsr;
1005 uint8_t data[64];
1006 } XSaveBNDCSR;
1007
1008 /* Ext. save area 5: Opmask */
1009 typedef struct XSaveOpmask {
1010 uint64_t opmask_regs[NB_OPMASK_REGS];
1011 } XSaveOpmask;
1012
1013 /* Ext. save area 6: ZMM_Hi256 */
1014 typedef struct XSaveZMM_Hi256 {
1015 uint8_t zmm_hi256[16][32];
1016 } XSaveZMM_Hi256;
1017
1018 /* Ext. save area 7: Hi16_ZMM */
1019 typedef struct XSaveHi16_ZMM {
1020 uint8_t hi16_zmm[16][64];
1021 } XSaveHi16_ZMM;
1022
1023 /* Ext. save area 9: PKRU state */
1024 typedef struct XSavePKRU {
1025 uint32_t pkru;
1026 uint32_t padding;
1027 } XSavePKRU;
1028
1029 typedef struct X86XSaveArea {
1030 X86LegacyXSaveArea legacy;
1031 X86XSaveHeader header;
1032
1033 /* Extended save areas: */
1034
1035 /* AVX State: */
1036 XSaveAVX avx_state;
1037 uint8_t padding[960 - 576 - sizeof(XSaveAVX)];
1038 /* MPX State: */
1039 XSaveBNDREG bndreg_state;
1040 XSaveBNDCSR bndcsr_state;
1041 /* AVX-512 State: */
1042 XSaveOpmask opmask_state;
1043 XSaveZMM_Hi256 zmm_hi256_state;
1044 XSaveHi16_ZMM hi16_zmm_state;
1045 /* PKRU State: */
1046 XSavePKRU pkru_state;
1047 } X86XSaveArea;
1048
1049 QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, avx_state) != 0x240);
1050 QEMU_BUILD_BUG_ON(sizeof(XSaveAVX) != 0x100);
1051 QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, bndreg_state) != 0x3c0);
1052 QEMU_BUILD_BUG_ON(sizeof(XSaveBNDREG) != 0x40);
1053 QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, bndcsr_state) != 0x400);
1054 QEMU_BUILD_BUG_ON(sizeof(XSaveBNDCSR) != 0x40);
1055 QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, opmask_state) != 0x440);
1056 QEMU_BUILD_BUG_ON(sizeof(XSaveOpmask) != 0x40);
1057 QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, zmm_hi256_state) != 0x480);
1058 QEMU_BUILD_BUG_ON(sizeof(XSaveZMM_Hi256) != 0x200);
1059 QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, hi16_zmm_state) != 0x680);
1060 QEMU_BUILD_BUG_ON(sizeof(XSaveHi16_ZMM) != 0x400);
1061 QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, pkru_state) != 0xA80);
1062 QEMU_BUILD_BUG_ON(sizeof(XSavePKRU) != 0x8);
1063
1064 typedef enum TPRAccess {
1065 TPR_ACCESS_READ,
1066 TPR_ACCESS_WRITE,
1067 } TPRAccess;
1068
1069 /* Cache information data structures: */
1070
1071 enum CacheType {
1072 DATA_CACHE,
1073 INSTRUCTION_CACHE,
1074 UNIFIED_CACHE
1075 };
1076
1077 typedef struct CPUCacheInfo {
1078 enum CacheType type;
1079 uint8_t level;
1080 /* Size in bytes */
1081 uint32_t size;
1082 /* Line size, in bytes */
1083 uint16_t line_size;
1084 /*
1085 * Associativity.
1086 * Note: representation of fully-associative caches is not implemented
1087 */
1088 uint8_t associativity;
1089 /* Physical line partitions. CPUID[0x8000001D].EBX, CPUID[4].EBX */
1090 uint8_t partitions;
1091 /* Number of sets. CPUID[0x8000001D].ECX, CPUID[4].ECX */
1092 uint32_t sets;
1093 /*
1094 * Lines per tag.
1095 * AMD-specific: CPUID[0x80000005], CPUID[0x80000006].
1096 * (Is this synonym to @partitions?)
1097 */
1098 uint8_t lines_per_tag;
1099
1100 /* Self-initializing cache */
1101 bool self_init;
1102 /*
1103 * WBINVD/INVD is not guaranteed to act upon lower level caches of
1104 * non-originating threads sharing this cache.
1105 * CPUID[4].EDX[bit 0], CPUID[0x8000001D].EDX[bit 0]
1106 */
1107 bool no_invd_sharing;
1108 /*
1109 * Cache is inclusive of lower cache levels.
1110 * CPUID[4].EDX[bit 1], CPUID[0x8000001D].EDX[bit 1].
1111 */
1112 bool inclusive;
1113 /*
1114 * A complex function is used to index the cache, potentially using all
1115 * address bits. CPUID[4].EDX[bit 2].
1116 */
1117 bool complex_indexing;
1118 } CPUCacheInfo;
1119
1120
1121 typedef struct CPUCaches {
1122 CPUCacheInfo *l1d_cache;
1123 CPUCacheInfo *l1i_cache;
1124 CPUCacheInfo *l2_cache;
1125 CPUCacheInfo *l3_cache;
1126 } CPUCaches;
1127
1128 typedef struct CPUX86State {
1129 /* standard registers */
1130 target_ulong regs[CPU_NB_REGS];
1131 target_ulong eip;
1132 target_ulong eflags; /* eflags register. During CPU emulation, CC
1133 flags and DF are set to zero because they are
1134 stored elsewhere */
1135
1136 /* emulator internal eflags handling */
1137 target_ulong cc_dst;
1138 target_ulong cc_src;
1139 target_ulong cc_src2;
1140 uint32_t cc_op;
1141 int32_t df; /* D flag : 1 if D = 0, -1 if D = 1 */
1142 uint32_t hflags; /* TB flags, see HF_xxx constants. These flags
1143 are known at translation time. */
1144 uint32_t hflags2; /* various other flags, see HF2_xxx constants. */
1145
1146 /* segments */
1147 SegmentCache segs[6]; /* selector values */
1148 SegmentCache ldt;
1149 SegmentCache tr;
1150 SegmentCache gdt; /* only base and limit are used */
1151 SegmentCache idt; /* only base and limit are used */
1152
1153 target_ulong cr[5]; /* NOTE: cr1 is unused */
1154 int32_t a20_mask;
1155
1156 BNDReg bnd_regs[4];
1157 BNDCSReg bndcs_regs;
1158 uint64_t msr_bndcfgs;
1159 uint64_t efer;
1160
1161 /* Beginning of state preserved by INIT (dummy marker). */
1162 struct {} start_init_save;
1163
1164 /* FPU state */
1165 unsigned int fpstt; /* top of stack index */
1166 uint16_t fpus;
1167 uint16_t fpuc;
1168 uint8_t fptags[8]; /* 0 = valid, 1 = empty */
1169 FPReg fpregs[8];
1170 /* KVM-only so far */
1171 uint16_t fpop;
1172 uint64_t fpip;
1173 uint64_t fpdp;
1174
1175 /* emulator internal variables */
1176 float_status fp_status;
1177 floatx80 ft0;
1178
1179 float_status mmx_status; /* for 3DNow! float ops */
1180 float_status sse_status;
1181 uint32_t mxcsr;
1182 ZMMReg xmm_regs[CPU_NB_REGS == 8 ? 8 : 32];
1183 ZMMReg xmm_t0;
1184 MMXReg mmx_t0;
1185
1186 XMMReg ymmh_regs[CPU_NB_REGS];
1187
1188 uint64_t opmask_regs[NB_OPMASK_REGS];
1189 YMMReg zmmh_regs[CPU_NB_REGS];
1190 ZMMReg hi16_zmm_regs[CPU_NB_REGS];
1191
1192 /* sysenter registers */
1193 uint32_t sysenter_cs;
1194 target_ulong sysenter_esp;
1195 target_ulong sysenter_eip;
1196 uint64_t star;
1197
1198 uint64_t vm_hsave;
1199
1200 #ifdef TARGET_X86_64
1201 target_ulong lstar;
1202 target_ulong cstar;
1203 target_ulong fmask;
1204 target_ulong kernelgsbase;
1205 #endif
1206
1207 uint64_t tsc;
1208 uint64_t tsc_adjust;
1209 uint64_t tsc_deadline;
1210 uint64_t tsc_aux;
1211
1212 uint64_t xcr0;
1213
1214 uint64_t mcg_status;
1215 uint64_t msr_ia32_misc_enable;
1216 uint64_t msr_ia32_feature_control;
1217
1218 uint64_t msr_fixed_ctr_ctrl;
1219 uint64_t msr_global_ctrl;
1220 uint64_t msr_global_status;
1221 uint64_t msr_global_ovf_ctrl;
1222 uint64_t msr_fixed_counters[MAX_FIXED_COUNTERS];
1223 uint64_t msr_gp_counters[MAX_GP_COUNTERS];
1224 uint64_t msr_gp_evtsel[MAX_GP_COUNTERS];
1225
1226 uint64_t pat;
1227 uint32_t smbase;
1228 uint64_t msr_smi_count;
1229
1230 uint32_t pkru;
1231
1232 uint64_t spec_ctrl;
1233 uint64_t virt_ssbd;
1234
1235 /* End of state preserved by INIT (dummy marker). */
1236 struct {} end_init_save;
1237
1238 uint64_t system_time_msr;
1239 uint64_t wall_clock_msr;
1240 uint64_t steal_time_msr;
1241 uint64_t async_pf_en_msr;
1242 uint64_t pv_eoi_en_msr;
1243
1244 /* Partition-wide HV MSRs, will be updated only on the first vcpu */
1245 uint64_t msr_hv_hypercall;
1246 uint64_t msr_hv_guest_os_id;
1247 uint64_t msr_hv_tsc;
1248
1249 /* Per-VCPU HV MSRs */
1250 uint64_t msr_hv_vapic;
1251 uint64_t msr_hv_crash_params[HV_CRASH_PARAMS];
1252 uint64_t msr_hv_runtime;
1253 uint64_t msr_hv_synic_control;
1254 uint64_t msr_hv_synic_evt_page;
1255 uint64_t msr_hv_synic_msg_page;
1256 uint64_t msr_hv_synic_sint[HV_SINT_COUNT];
1257 uint64_t msr_hv_stimer_config[HV_STIMER_COUNT];
1258 uint64_t msr_hv_stimer_count[HV_STIMER_COUNT];
1259 uint64_t msr_hv_reenlightenment_control;
1260 uint64_t msr_hv_tsc_emulation_control;
1261 uint64_t msr_hv_tsc_emulation_status;
1262
1263 uint64_t msr_rtit_ctrl;
1264 uint64_t msr_rtit_status;
1265 uint64_t msr_rtit_output_base;
1266 uint64_t msr_rtit_output_mask;
1267 uint64_t msr_rtit_cr3_match;
1268 uint64_t msr_rtit_addrs[MAX_RTIT_ADDRS];
1269
1270 /* exception/interrupt handling */
1271 int error_code;
1272 int exception_is_int;
1273 target_ulong exception_next_eip;
1274 target_ulong dr[8]; /* debug registers; note dr4 and dr5 are unused */
1275 union {
1276 struct CPUBreakpoint *cpu_breakpoint[4];
1277 struct CPUWatchpoint *cpu_watchpoint[4];
1278 }; /* break/watchpoints for dr[0..3] */
1279 int old_exception; /* exception in flight */
1280
1281 uint64_t vm_vmcb;
1282 uint64_t tsc_offset;
1283 uint64_t intercept;
1284 uint16_t intercept_cr_read;
1285 uint16_t intercept_cr_write;
1286 uint16_t intercept_dr_read;
1287 uint16_t intercept_dr_write;
1288 uint32_t intercept_exceptions;
1289 uint64_t nested_cr3;
1290 uint32_t nested_pg_mode;
1291 uint8_t v_tpr;
1292
1293 /* KVM states, automatically cleared on reset */
1294 uint8_t nmi_injected;
1295 uint8_t nmi_pending;
1296
1297 uintptr_t retaddr;
1298
1299 /* Fields up to this point are cleared by a CPU reset */
1300 struct {} end_reset_fields;
1301
1302 CPU_COMMON
1303
1304 /* Fields after CPU_COMMON are preserved across CPU reset. */
1305
1306 /* processor features (e.g. for CPUID insn) */
1307 /* Minimum level/xlevel/xlevel2, based on CPU model + features */
1308 uint32_t cpuid_min_level, cpuid_min_xlevel, cpuid_min_xlevel2;
1309 /* Maximum level/xlevel/xlevel2 value for auto-assignment: */
1310 uint32_t cpuid_max_level, cpuid_max_xlevel, cpuid_max_xlevel2;
1311 /* Actual level/xlevel/xlevel2 value: */
1312 uint32_t cpuid_level, cpuid_xlevel, cpuid_xlevel2;
1313 uint32_t cpuid_vendor1;
1314 uint32_t cpuid_vendor2;
1315 uint32_t cpuid_vendor3;
1316 uint32_t cpuid_version;
1317 FeatureWordArray features;
1318 /* Features that were explicitly enabled/disabled */
1319 FeatureWordArray user_features;
1320 uint32_t cpuid_model[12];
1321 /* Cache information for CPUID. When legacy-cache=on, the cache data
1322 * on each CPUID leaf will be different, because we keep compatibility
1323 * with old QEMU versions.
1324 */
1325 CPUCaches cache_info_cpuid2, cache_info_cpuid4, cache_info_amd;
1326
1327 /* MTRRs */
1328 uint64_t mtrr_fixed[11];
1329 uint64_t mtrr_deftype;
1330 MTRRVar mtrr_var[MSR_MTRRcap_VCNT];
1331
1332 /* For KVM */
1333 uint32_t mp_state;
1334 int32_t exception_injected;
1335 int32_t interrupt_injected;
1336 uint8_t soft_interrupt;
1337 uint8_t has_error_code;
1338 uint32_t ins_len;
1339 uint32_t sipi_vector;
1340 bool tsc_valid;
1341 int64_t tsc_khz;
1342 int64_t user_tsc_khz; /* for sanity check only */
1343 #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
1344 void *xsave_buf;
1345 #endif
1346 #if defined(CONFIG_HVF)
1347 HVFX86EmulatorState *hvf_emul;
1348 #endif
1349
1350 uint64_t mcg_cap;
1351 uint64_t mcg_ctl;
1352 uint64_t mcg_ext_ctl;
1353 uint64_t mce_banks[MCE_BANKS_DEF*4];
1354 uint64_t xstate_bv;
1355
1356 /* vmstate */
1357 uint16_t fpus_vmstate;
1358 uint16_t fptag_vmstate;
1359 uint16_t fpregs_format_vmstate;
1360
1361 uint64_t xss;
1362
1363 TPRAccess tpr_access_type;
1364 } CPUX86State;
1365
1366 struct kvm_msrs;
1367
1368 /**
1369 * X86CPU:
1370 * @env: #CPUX86State
1371 * @migratable: If set, only migratable flags will be accepted when "enforce"
1372 * mode is used, and only migratable flags will be included in the "host"
1373 * CPU model.
1374 *
1375 * An x86 CPU.
1376 */
1377 struct X86CPU {
1378 /*< private >*/
1379 CPUState parent_obj;
1380 /*< public >*/
1381
1382 CPUX86State env;
1383
1384 bool hyperv_vapic;
1385 bool hyperv_relaxed_timing;
1386 int hyperv_spinlock_attempts;
1387 char *hyperv_vendor_id;
1388 bool hyperv_time;
1389 bool hyperv_crash;
1390 bool hyperv_reset;
1391 bool hyperv_vpindex;
1392 bool hyperv_runtime;
1393 bool hyperv_synic;
1394 bool hyperv_synic_kvm_only;
1395 bool hyperv_stimer;
1396 bool hyperv_frequencies;
1397 bool hyperv_reenlightenment;
1398 bool hyperv_tlbflush;
1399 bool hyperv_evmcs;
1400 bool hyperv_ipi;
1401 bool check_cpuid;
1402 bool enforce_cpuid;
1403 bool expose_kvm;
1404 bool expose_tcg;
1405 bool migratable;
1406 bool migrate_smi_count;
1407 bool max_features; /* Enable all supported features automatically */
1408 uint32_t apic_id;
1409
1410 /* Enables publishing of TSC increment and Local APIC bus frequencies to
1411 * the guest OS in CPUID page 0x40000010, the same way that VMWare does. */
1412 bool vmware_cpuid_freq;
1413
1414 /* if true the CPUID code directly forward host cache leaves to the guest */
1415 bool cache_info_passthrough;
1416
1417 /* if true the CPUID code directly forwards
1418 * host monitor/mwait leaves to the guest */
1419 struct {
1420 uint32_t eax;
1421 uint32_t ebx;
1422 uint32_t ecx;
1423 uint32_t edx;
1424 } mwait;
1425
1426 /* Features that were filtered out because of missing host capabilities */
1427 uint32_t filtered_features[FEATURE_WORDS];
1428
1429 /* Enable PMU CPUID bits. This can't be enabled by default yet because
1430 * it doesn't have ABI stability guarantees, as it passes all PMU CPUID
1431 * bits returned by GET_SUPPORTED_CPUID (that depend on host CPU and kernel
1432 * capabilities) directly to the guest.
1433 */
1434 bool enable_pmu;
1435
1436 /* LMCE support can be enabled/disabled via cpu option 'lmce=on/off'. It is
1437 * disabled by default to avoid breaking migration between QEMU with
1438 * different LMCE configurations.
1439 */
1440 bool enable_lmce;
1441
1442 /* Compatibility bits for old machine types.
1443 * If true present virtual l3 cache for VM, the vcpus in the same virtual
1444 * socket share an virtual l3 cache.
1445 */
1446 bool enable_l3_cache;
1447
1448 /* Compatibility bits for old machine types.
1449 * If true present the old cache topology information
1450 */
1451 bool legacy_cache;
1452
1453 /* Compatibility bits for old machine types: */
1454 bool enable_cpuid_0xb;
1455
1456 /* Enable auto level-increase for all CPUID leaves */
1457 bool full_cpuid_auto_level;
1458
1459 /* Enable auto level-increase for Intel Processor Trace leave */
1460 bool intel_pt_auto_level;
1461
1462 /* if true fill the top bits of the MTRR_PHYSMASKn variable range */
1463 bool fill_mtrr_mask;
1464
1465 /* if true override the phys_bits value with a value read from the host */
1466 bool host_phys_bits;
1467
1468 /* if set, limit maximum value for phys_bits when host_phys_bits is true */
1469 uint8_t host_phys_bits_limit;
1470
1471 /* Stop SMI delivery for migration compatibility with old machines */
1472 bool kvm_no_smi_migration;
1473
1474 /* Number of physical address bits supported */
1475 uint32_t phys_bits;
1476
1477 /* in order to simplify APIC support, we leave this pointer to the
1478 user */
1479 struct DeviceState *apic_state;
1480 struct MemoryRegion *cpu_as_root, *cpu_as_mem, *smram;
1481 Notifier machine_done;
1482
1483 struct kvm_msrs *kvm_msr_buf;
1484
1485 int32_t node_id; /* NUMA node this CPU belongs to */
1486 int32_t socket_id;
1487 int32_t core_id;
1488 int32_t thread_id;
1489
1490 int32_t hv_max_vps;
1491 };
1492
1493 static inline X86CPU *x86_env_get_cpu(CPUX86State *env)
1494 {
1495 return container_of(env, X86CPU, env);
1496 }
1497
1498 #define ENV_GET_CPU(e) CPU(x86_env_get_cpu(e))
1499
1500 #define ENV_OFFSET offsetof(X86CPU, env)
1501
1502 #ifndef CONFIG_USER_ONLY
1503 extern struct VMStateDescription vmstate_x86_cpu;
1504 #endif
1505
1506 /**
1507 * x86_cpu_do_interrupt:
1508 * @cpu: vCPU the interrupt is to be handled by.
1509 */
1510 void x86_cpu_do_interrupt(CPUState *cpu);
1511 bool x86_cpu_exec_interrupt(CPUState *cpu, int int_req);
1512 int x86_cpu_pending_interrupt(CPUState *cs, int interrupt_request);
1513
1514 int x86_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cpu,
1515 int cpuid, void *opaque);
1516 int x86_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cpu,
1517 int cpuid, void *opaque);
1518 int x86_cpu_write_elf64_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
1519 void *opaque);
1520 int x86_cpu_write_elf32_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
1521 void *opaque);
1522
1523 void x86_cpu_get_memory_mapping(CPUState *cpu, MemoryMappingList *list,
1524 Error **errp);
1525
1526 void x86_cpu_dump_state(CPUState *cs, FILE *f, int flags);
1527
1528 hwaddr x86_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
1529
1530 int x86_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg);
1531 int x86_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
1532
1533 void x86_cpu_exec_enter(CPUState *cpu);
1534 void x86_cpu_exec_exit(CPUState *cpu);
1535
1536 void x86_cpu_list(void);
1537 int cpu_x86_support_mca_broadcast(CPUX86State *env);
1538
1539 int cpu_get_pic_interrupt(CPUX86State *s);
1540 /* MSDOS compatibility mode FPU exception support */
1541 void cpu_set_ferr(CPUX86State *s);
1542 /* mpx_helper.c */
1543 void cpu_sync_bndcs_hflags(CPUX86State *env);
1544
1545 /* this function must always be used to load data in the segment
1546 cache: it synchronizes the hflags with the segment cache values */
1547 static inline void cpu_x86_load_seg_cache(CPUX86State *env,
1548 int seg_reg, unsigned int selector,
1549 target_ulong base,
1550 unsigned int limit,
1551 unsigned int flags)
1552 {
1553 SegmentCache *sc;
1554 unsigned int new_hflags;
1555
1556 sc = &env->segs[seg_reg];
1557 sc->selector = selector;
1558 sc->base = base;
1559 sc->limit = limit;
1560 sc->flags = flags;
1561
1562 /* update the hidden flags */
1563 {
1564 if (seg_reg == R_CS) {
1565 #ifdef TARGET_X86_64
1566 if ((env->hflags & HF_LMA_MASK) && (flags & DESC_L_MASK)) {
1567 /* long mode */
1568 env->hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
1569 env->hflags &= ~(HF_ADDSEG_MASK);
1570 } else
1571 #endif
1572 {
1573 /* legacy / compatibility case */
1574 new_hflags = (env->segs[R_CS].flags & DESC_B_MASK)
1575 >> (DESC_B_SHIFT - HF_CS32_SHIFT);
1576 env->hflags = (env->hflags & ~(HF_CS32_MASK | HF_CS64_MASK)) |
1577 new_hflags;
1578 }
1579 }
1580 if (seg_reg == R_SS) {
1581 int cpl = (flags >> DESC_DPL_SHIFT) & 3;
1582 #if HF_CPL_MASK != 3
1583 #error HF_CPL_MASK is hardcoded
1584 #endif
1585 env->hflags = (env->hflags & ~HF_CPL_MASK) | cpl;
1586 /* Possibly switch between BNDCFGS and BNDCFGU */
1587 cpu_sync_bndcs_hflags(env);
1588 }
1589 new_hflags = (env->segs[R_SS].flags & DESC_B_MASK)
1590 >> (DESC_B_SHIFT - HF_SS32_SHIFT);
1591 if (env->hflags & HF_CS64_MASK) {
1592 /* zero base assumed for DS, ES and SS in long mode */
1593 } else if (!(env->cr[0] & CR0_PE_MASK) ||
1594 (env->eflags & VM_MASK) ||
1595 !(env->hflags & HF_CS32_MASK)) {
1596 /* XXX: try to avoid this test. The problem comes from the
1597 fact that is real mode or vm86 mode we only modify the
1598 'base' and 'selector' fields of the segment cache to go
1599 faster. A solution may be to force addseg to one in
1600 translate-i386.c. */
1601 new_hflags |= HF_ADDSEG_MASK;
1602 } else {
1603 new_hflags |= ((env->segs[R_DS].base |
1604 env->segs[R_ES].base |
1605 env->segs[R_SS].base) != 0) <<
1606 HF_ADDSEG_SHIFT;
1607 }
1608 env->hflags = (env->hflags &
1609 ~(HF_SS32_MASK | HF_ADDSEG_MASK)) | new_hflags;
1610 }
1611 }
1612
1613 static inline void cpu_x86_load_seg_cache_sipi(X86CPU *cpu,
1614 uint8_t sipi_vector)
1615 {
1616 CPUState *cs = CPU(cpu);
1617 CPUX86State *env = &cpu->env;
1618
1619 env->eip = 0;
1620 cpu_x86_load_seg_cache(env, R_CS, sipi_vector << 8,
1621 sipi_vector << 12,
1622 env->segs[R_CS].limit,
1623 env->segs[R_CS].flags);
1624 cs->halted = 0;
1625 }
1626
1627 int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
1628 target_ulong *base, unsigned int *limit,
1629 unsigned int *flags);
1630
1631 /* op_helper.c */
1632 /* used for debug or cpu save/restore */
1633
1634 /* cpu-exec.c */
1635 /* the following helpers are only usable in user mode simulation as
1636 they can trigger unexpected exceptions */
1637 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector);
1638 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32);
1639 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32);
1640 void cpu_x86_fxsave(CPUX86State *s, target_ulong ptr);
1641 void cpu_x86_fxrstor(CPUX86State *s, target_ulong ptr);
1642
1643 /* you can call this signal handler from your SIGBUS and SIGSEGV
1644 signal handlers to inform the virtual CPU of exceptions. non zero
1645 is returned if the signal was handled by the virtual CPU. */
1646 int cpu_x86_signal_handler(int host_signum, void *pinfo,
1647 void *puc);
1648
1649 /* cpu.c */
1650 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
1651 uint32_t *eax, uint32_t *ebx,
1652 uint32_t *ecx, uint32_t *edx);
1653 void cpu_clear_apic_feature(CPUX86State *env);
1654 void host_cpuid(uint32_t function, uint32_t count,
1655 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx);
1656 void host_vendor_fms(char *vendor, int *family, int *model, int *stepping);
1657
1658 /* helper.c */
1659 int x86_cpu_handle_mmu_fault(CPUState *cpu, vaddr addr, int size,
1660 int is_write, int mmu_idx);
1661 void x86_cpu_set_a20(X86CPU *cpu, int a20_state);
1662
1663 #ifndef CONFIG_USER_ONLY
1664 static inline int x86_asidx_from_attrs(CPUState *cs, MemTxAttrs attrs)
1665 {
1666 return !!attrs.secure;
1667 }
1668
1669 static inline AddressSpace *cpu_addressspace(CPUState *cs, MemTxAttrs attrs)
1670 {
1671 return cpu_get_address_space(cs, cpu_asidx_from_attrs(cs, attrs));
1672 }
1673
1674 uint8_t x86_ldub_phys(CPUState *cs, hwaddr addr);
1675 uint32_t x86_lduw_phys(CPUState *cs, hwaddr addr);
1676 uint32_t x86_ldl_phys(CPUState *cs, hwaddr addr);
1677 uint64_t x86_ldq_phys(CPUState *cs, hwaddr addr);
1678 void x86_stb_phys(CPUState *cs, hwaddr addr, uint8_t val);
1679 void x86_stl_phys_notdirty(CPUState *cs, hwaddr addr, uint32_t val);
1680 void x86_stw_phys(CPUState *cs, hwaddr addr, uint32_t val);
1681 void x86_stl_phys(CPUState *cs, hwaddr addr, uint32_t val);
1682 void x86_stq_phys(CPUState *cs, hwaddr addr, uint64_t val);
1683 #endif
1684
1685 void breakpoint_handler(CPUState *cs);
1686
1687 /* will be suppressed */
1688 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0);
1689 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3);
1690 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4);
1691 void cpu_x86_update_dr7(CPUX86State *env, uint32_t new_dr7);
1692
1693 /* hw/pc.c */
1694 uint64_t cpu_get_tsc(CPUX86State *env);
1695
1696 #define TARGET_PAGE_BITS 12
1697
1698 #ifdef TARGET_X86_64
1699 #define TARGET_PHYS_ADDR_SPACE_BITS 52
1700 /* ??? This is really 48 bits, sign-extended, but the only thing
1701 accessible to userland with bit 48 set is the VSYSCALL, and that
1702 is handled via other mechanisms. */
1703 #define TARGET_VIRT_ADDR_SPACE_BITS 47
1704 #else
1705 #define TARGET_PHYS_ADDR_SPACE_BITS 36
1706 #define TARGET_VIRT_ADDR_SPACE_BITS 32
1707 #endif
1708
1709 /* XXX: This value should match the one returned by CPUID
1710 * and in exec.c */
1711 # if defined(TARGET_X86_64)
1712 # define TCG_PHYS_ADDR_BITS 40
1713 # else
1714 # define TCG_PHYS_ADDR_BITS 36
1715 # endif
1716
1717 #define PHYS_ADDR_MASK MAKE_64BIT_MASK(0, TCG_PHYS_ADDR_BITS)
1718
1719 #define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU
1720 #define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX)
1721 #define CPU_RESOLVING_TYPE TYPE_X86_CPU
1722
1723 #ifdef TARGET_X86_64
1724 #define TARGET_DEFAULT_CPU_TYPE X86_CPU_TYPE_NAME("qemu64")
1725 #else
1726 #define TARGET_DEFAULT_CPU_TYPE X86_CPU_TYPE_NAME("qemu32")
1727 #endif
1728
1729 #define cpu_signal_handler cpu_x86_signal_handler
1730 #define cpu_list x86_cpu_list
1731
1732 /* MMU modes definitions */
1733 #define MMU_MODE0_SUFFIX _ksmap
1734 #define MMU_MODE1_SUFFIX _user
1735 #define MMU_MODE2_SUFFIX _knosmap /* SMAP disabled or CPL<3 && AC=1 */
1736 #define MMU_KSMAP_IDX 0
1737 #define MMU_USER_IDX 1
1738 #define MMU_KNOSMAP_IDX 2
1739 static inline int cpu_mmu_index(CPUX86State *env, bool ifetch)
1740 {
1741 return (env->hflags & HF_CPL_MASK) == 3 ? MMU_USER_IDX :
1742 (!(env->hflags & HF_SMAP_MASK) || (env->eflags & AC_MASK))
1743 ? MMU_KNOSMAP_IDX : MMU_KSMAP_IDX;
1744 }
1745
1746 static inline int cpu_mmu_index_kernel(CPUX86State *env)
1747 {
1748 return !(env->hflags & HF_SMAP_MASK) ? MMU_KNOSMAP_IDX :
1749 ((env->hflags & HF_CPL_MASK) < 3 && (env->eflags & AC_MASK))
1750 ? MMU_KNOSMAP_IDX : MMU_KSMAP_IDX;
1751 }
1752
1753 #define CC_DST (env->cc_dst)
1754 #define CC_SRC (env->cc_src)
1755 #define CC_SRC2 (env->cc_src2)
1756 #define CC_OP (env->cc_op)
1757
1758 /* n must be a constant to be efficient */
1759 static inline target_long lshift(target_long x, int n)
1760 {
1761 if (n >= 0) {
1762 return x << n;
1763 } else {
1764 return x >> (-n);
1765 }
1766 }
1767
1768 /* float macros */
1769 #define FT0 (env->ft0)
1770 #define ST0 (env->fpregs[env->fpstt].d)
1771 #define ST(n) (env->fpregs[(env->fpstt + (n)) & 7].d)
1772 #define ST1 ST(1)
1773
1774 /* translate.c */
1775 void tcg_x86_init(void);
1776
1777 #include "exec/cpu-all.h"
1778 #include "svm.h"
1779
1780 #if !defined(CONFIG_USER_ONLY)
1781 #include "hw/i386/apic.h"
1782 #endif
1783
1784 static inline void cpu_get_tb_cpu_state(CPUX86State *env, target_ulong *pc,
1785 target_ulong *cs_base, uint32_t *flags)
1786 {
1787 *cs_base = env->segs[R_CS].base;
1788 *pc = *cs_base + env->eip;
1789 *flags = env->hflags |
1790 (env->eflags & (IOPL_MASK | TF_MASK | RF_MASK | VM_MASK | AC_MASK));
1791 }
1792
1793 void do_cpu_init(X86CPU *cpu);
1794 void do_cpu_sipi(X86CPU *cpu);
1795
1796 #define MCE_INJECT_BROADCAST 1
1797 #define MCE_INJECT_UNCOND_AO 2
1798
1799 void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank,
1800 uint64_t status, uint64_t mcg_status, uint64_t addr,
1801 uint64_t misc, int flags);
1802
1803 /* excp_helper.c */
1804 void QEMU_NORETURN raise_exception(CPUX86State *env, int exception_index);
1805 void QEMU_NORETURN raise_exception_ra(CPUX86State *env, int exception_index,
1806 uintptr_t retaddr);
1807 void QEMU_NORETURN raise_exception_err(CPUX86State *env, int exception_index,
1808 int error_code);
1809 void QEMU_NORETURN raise_exception_err_ra(CPUX86State *env, int exception_index,
1810 int error_code, uintptr_t retaddr);
1811 void QEMU_NORETURN raise_interrupt(CPUX86State *nenv, int intno, int is_int,
1812 int error_code, int next_eip_addend);
1813
1814 /* cc_helper.c */
1815 extern const uint8_t parity_table[256];
1816 uint32_t cpu_cc_compute_all(CPUX86State *env1, int op);
1817
1818 static inline uint32_t cpu_compute_eflags(CPUX86State *env)
1819 {
1820 uint32_t eflags = env->eflags;
1821 if (tcg_enabled()) {
1822 eflags |= cpu_cc_compute_all(env, CC_OP) | (env->df & DF_MASK);
1823 }
1824 return eflags;
1825 }
1826
1827 /* NOTE: the translator must set DisasContext.cc_op to CC_OP_EFLAGS
1828 * after generating a call to a helper that uses this.
1829 */
1830 static inline void cpu_load_eflags(CPUX86State *env, int eflags,
1831 int update_mask)
1832 {
1833 CC_SRC = eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
1834 CC_OP = CC_OP_EFLAGS;
1835 env->df = 1 - (2 * ((eflags >> 10) & 1));
1836 env->eflags = (env->eflags & ~update_mask) |
1837 (eflags & update_mask) | 0x2;
1838 }
1839
1840 /* load efer and update the corresponding hflags. XXX: do consistency
1841 checks with cpuid bits? */
1842 static inline void cpu_load_efer(CPUX86State *env, uint64_t val)
1843 {
1844 env->efer = val;
1845 env->hflags &= ~(HF_LMA_MASK | HF_SVME_MASK);
1846 if (env->efer & MSR_EFER_LMA) {
1847 env->hflags |= HF_LMA_MASK;
1848 }
1849 if (env->efer & MSR_EFER_SVME) {
1850 env->hflags |= HF_SVME_MASK;
1851 }
1852 }
1853
1854 static inline MemTxAttrs cpu_get_mem_attrs(CPUX86State *env)
1855 {
1856 return ((MemTxAttrs) { .secure = (env->hflags & HF_SMM_MASK) != 0 });
1857 }
1858
1859 static inline int32_t x86_get_a20_mask(CPUX86State *env)
1860 {
1861 if (env->hflags & HF_SMM_MASK) {
1862 return -1;
1863 } else {
1864 return env->a20_mask;
1865 }
1866 }
1867
1868 /* fpu_helper.c */
1869 void update_fp_status(CPUX86State *env);
1870 void update_mxcsr_status(CPUX86State *env);
1871
1872 static inline void cpu_set_mxcsr(CPUX86State *env, uint32_t mxcsr)
1873 {
1874 env->mxcsr = mxcsr;
1875 if (tcg_enabled()) {
1876 update_mxcsr_status(env);
1877 }
1878 }
1879
1880 static inline void cpu_set_fpuc(CPUX86State *env, uint16_t fpuc)
1881 {
1882 env->fpuc = fpuc;
1883 if (tcg_enabled()) {
1884 update_fp_status(env);
1885 }
1886 }
1887
1888 /* mem_helper.c */
1889 void helper_lock_init(void);
1890
1891 /* svm_helper.c */
1892 void cpu_svm_check_intercept_param(CPUX86State *env1, uint32_t type,
1893 uint64_t param, uintptr_t retaddr);
1894 void QEMU_NORETURN cpu_vmexit(CPUX86State *nenv, uint32_t exit_code,
1895 uint64_t exit_info_1, uintptr_t retaddr);
1896 void do_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1);
1897
1898 /* seg_helper.c */
1899 void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw);
1900
1901 /* smm_helper.c */
1902 void do_smm_enter(X86CPU *cpu);
1903
1904 /* apic.c */
1905 void cpu_report_tpr_access(CPUX86State *env, TPRAccess access);
1906 void apic_handle_tpr_access_report(DeviceState *d, target_ulong ip,
1907 TPRAccess access);
1908
1909
1910 /* Change the value of a KVM-specific default
1911 *
1912 * If value is NULL, no default will be set and the original
1913 * value from the CPU model table will be kept.
1914 *
1915 * It is valid to call this function only for properties that
1916 * are already present in the kvm_default_props table.
1917 */
1918 void x86_cpu_change_kvm_default(const char *prop, const char *value);
1919
1920 /* Return name of 32-bit register, from a R_* constant */
1921 const char *get_register_name_32(unsigned int reg);
1922
1923 void enable_compat_apic_id_mode(void);
1924
1925 #define APIC_DEFAULT_ADDRESS 0xfee00000
1926 #define APIC_SPACE_SIZE 0x100000
1927
1928 void x86_cpu_dump_local_apic_state(CPUState *cs, int flags);
1929
1930 /* cpu.c */
1931 bool cpu_is_bsp(X86CPU *cpu);
1932
1933 void x86_cpu_xrstor_all_areas(X86CPU *cpu, const X86XSaveArea *buf);
1934 void x86_cpu_xsave_all_areas(X86CPU *cpu, X86XSaveArea *buf);
1935 void x86_update_hflags(CPUX86State* env);
1936
1937 #endif /* I386_CPU_H */