#define HF_MPX_EN_SHIFT 25 /* MPX Enabled (CR4+XCR0+BNDCFGx) */
#define HF_MPX_IU_SHIFT 26 /* BND registers in-use */
#define HF_UMIP_SHIFT 27 /* CR4.UMIP */
+#define HF_AVX_EN_SHIFT 28 /* AVX Enabled (CR4+XCR0) */
#define HF_CPL_MASK (3 << HF_CPL_SHIFT)
#define HF_INHIBIT_IRQ_MASK (1 << HF_INHIBIT_IRQ_SHIFT)
#define HF_MPX_EN_MASK (1 << HF_MPX_EN_SHIFT)
#define HF_MPX_IU_MASK (1 << HF_MPX_IU_SHIFT)
#define HF_UMIP_MASK (1 << HF_UMIP_SHIFT)
+#define HF_AVX_EN_MASK (1 << HF_AVX_EN_SHIFT)
/* hflags2 */
#define HYPERV_FEAT_STIMER_DIRECT 14
#define HYPERV_FEAT_AVIC 15
#define HYPERV_FEAT_SYNDBG 16
+#define HYPERV_FEAT_MSR_BITMAP 17
+#define HYPERV_FEAT_XMM_INPUT 18
+#define HYPERV_FEAT_TLBFLUSH_EXT 19
+#define HYPERV_FEAT_TLBFLUSH_DIRECT 20
#ifndef HYPERV_SPINLOCK_NEVER_NOTIFY
#define HYPERV_SPINLOCK_NEVER_NOTIFY 0xFFFFFFFF
uint32_t flags;
} SegmentCache;
-#define MMREG_UNION(n, bits) \
- union n { \
- uint8_t _b_##n[(bits)/8]; \
- uint16_t _w_##n[(bits)/16]; \
- uint32_t _l_##n[(bits)/32]; \
- uint64_t _q_##n[(bits)/64]; \
- float32 _s_##n[(bits)/32]; \
- float64 _d_##n[(bits)/64]; \
- }
-
-typedef MMREG_UNION(ZMMReg, 512) ZMMReg;
-typedef MMREG_UNION(MMXReg, 64) MMXReg;
+typedef union MMXReg {
+ uint8_t _b_MMXReg[64 / 8];
+ uint16_t _w_MMXReg[64 / 16];
+ uint32_t _l_MMXReg[64 / 32];
+ uint64_t _q_MMXReg[64 / 64];
+ float32 _s_MMXReg[64 / 32];
+ float64 _d_MMXReg[64 / 64];
+} MMXReg;
+
+typedef union XMMReg {
+ uint64_t _q_XMMReg[128 / 64];
+} XMMReg;
+
+typedef union YMMReg {
+ uint64_t _q_YMMReg[256 / 64];
+ XMMReg _x_YMMReg[256 / 128];
+} YMMReg;
+
+typedef union ZMMReg {
+ uint8_t _b_ZMMReg[512 / 8];
+ uint16_t _w_ZMMReg[512 / 16];
+ uint32_t _l_ZMMReg[512 / 32];
+ uint64_t _q_ZMMReg[512 / 64];
+ float16 _h_ZMMReg[512 / 16];
+ float32 _s_ZMMReg[512 / 32];
+ float64 _d_ZMMReg[512 / 64];
+ XMMReg _x_ZMMReg[512 / 128];
+ YMMReg _y_ZMMReg[512 / 256];
+} ZMMReg;
typedef struct BNDReg {
uint64_t lb;
#define ZMM_B(n) _b_ZMMReg[63 - (n)]
#define ZMM_W(n) _w_ZMMReg[31 - (n)]
#define ZMM_L(n) _l_ZMMReg[15 - (n)]
+#define ZMM_H(n) _h_ZMMReg[31 - (n)]
#define ZMM_S(n) _s_ZMMReg[15 - (n)]
#define ZMM_Q(n) _q_ZMMReg[7 - (n)]
#define ZMM_D(n) _d_ZMMReg[7 - (n)]
+#define ZMM_X(n) _x_ZMMReg[3 - (n)]
+#define ZMM_Y(n) _y_ZMMReg[1 - (n)]
+
+#define XMM_Q(n) _q_XMMReg[1 - (n)]
+
+#define YMM_Q(n) _q_YMMReg[3 - (n)]
+#define YMM_X(n) _x_YMMReg[1 - (n)]
#define MMX_B(n) _b_MMXReg[7 - (n)]
#define MMX_W(n) _w_MMXReg[3 - (n)]
#define ZMM_B(n) _b_ZMMReg[n]
#define ZMM_W(n) _w_ZMMReg[n]
#define ZMM_L(n) _l_ZMMReg[n]
+#define ZMM_H(n) _h_ZMMReg[n]
#define ZMM_S(n) _s_ZMMReg[n]
#define ZMM_Q(n) _q_ZMMReg[n]
#define ZMM_D(n) _d_ZMMReg[n]
+#define ZMM_X(n) _x_ZMMReg[n]
+#define ZMM_Y(n) _y_ZMMReg[n]
+
+#define XMM_Q(n) _q_XMMReg[n]
+
+#define YMM_Q(n) _q_YMMReg[n]
+#define YMM_X(n) _x_YMMReg[n]
#define MMX_B(n) _b_MMXReg[n]
#define MMX_W(n) _w_MMXReg[n]
float_status mmx_status; /* for 3DNow! float ops */
float_status sse_status;
uint32_t mxcsr;
- ZMMReg xmm_regs[CPU_NB_REGS == 8 ? 8 : 32];
- ZMMReg xmm_t0;
+ ZMMReg xmm_regs[CPU_NB_REGS == 8 ? 8 : 32] QEMU_ALIGNED(16);
+ ZMMReg xmm_t0 QEMU_ALIGNED(16);
MMXReg mmx_t0;
uint64_t opmask_regs[NB_OPMASK_REGS];
uint8_t has_error_code;
uint8_t exception_has_payload;
uint64_t exception_payload;
+ uint8_t triple_fault_pending;
uint32_t ins_len;
uint32_t sipi_vector;
bool tsc_valid;
uint32_t hyperv_vendor_id[3];
uint32_t hyperv_interface_id[4];
uint32_t hyperv_limits[3];
- uint32_t hyperv_nested[4];
bool hyperv_enforce_cpuid;
uint32_t hyperv_ver_id_build;
uint16_t hyperv_ver_id_major;
int x86_cpu_pending_interrupt(CPUState *cs, int interrupt_request);
int x86_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cpu,
- int cpuid, void *opaque);
+ int cpuid, DumpState *s);
int x86_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cpu,
- int cpuid, void *opaque);
+ int cpuid, DumpState *s);
int x86_cpu_write_elf64_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
- void *opaque);
+ DumpState *s);
int x86_cpu_write_elf32_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
- void *opaque);
+ DumpState *s);
void x86_cpu_get_memory_mapping(CPUState *cpu, MemoryMappingList *list,
Error **errp);
void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32);
void cpu_x86_fxsave(CPUX86State *s, target_ulong ptr);
void cpu_x86_fxrstor(CPUX86State *s, target_ulong ptr);
+void cpu_x86_xsave(CPUX86State *s, target_ulong ptr);
+void cpu_x86_xrstor(CPUX86State *s, target_ulong ptr);
/* cpu.c */
void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
} PropValue;
void x86_cpu_apply_props(X86CPU *cpu, PropValue *props);
+void x86_cpu_after_reset(X86CPU *cpu);
+
uint32_t cpu_x86_virtual_addr_width(CPUX86State *env);
/* cpu.c other functions (cpuid) */
/* helper.c */
void x86_cpu_set_a20(X86CPU *cpu, int a20_state);
+void cpu_sync_avx_hflag(CPUX86State *env);
#ifndef CONFIG_USER_ONLY
static inline int x86_asidx_from_attrs(CPUState *cs, MemTxAttrs attrs)
#define MMU_KSMAP_IDX 0
#define MMU_USER_IDX 1
#define MMU_KNOSMAP_IDX 2
+#define MMU_NESTED_IDX 3
+#define MMU_PHYS_IDX 4
+
static inline int cpu_mmu_index(CPUX86State *env, bool ifetch)
{
return (env->hflags & HF_CPL_MASK) == 3 ? MMU_USER_IDX :
void x86_cpu_xrstor_all_areas(X86CPU *cpu, const void *buf, uint32_t buflen);
void x86_cpu_xsave_all_areas(X86CPU *cpu, void *buf, uint32_t buflen);
+uint32_t xsave_area_size(uint64_t mask, bool compacted);
void x86_update_hflags(CPUX86State* env);
static inline bool hyperv_feat_enabled(X86CPU *cpu, int feat)
return (env->int_ctl & V_IRQ_MASK) && (int_prio >= tpr);
}
-hwaddr get_hphys(CPUState *cs, hwaddr gphys, MMUAccessType access_type,
- int *prot);
#if defined(TARGET_X86_64) && \
defined(CONFIG_USER_ONLY) && \
defined(CONFIG_LINUX)