X86_OP_ENTRY3(op, None, None, None, None, None, None, ## __VA_ARGS__)
#define cpuid(feat) .cpuid = X86_FEAT_##feat,
-#define i64 .special = X86_SPECIAL_i64,
-#define o64 .special = X86_SPECIAL_o64,
#define xchg .special = X86_SPECIAL_Locked,
#define mmx .special = X86_SPECIAL_MMX,
#define zext0 .special = X86_SPECIAL_ZExtOp0,
#define vex12 .vex_class = 12,
#define vex13 .vex_class = 13,
+#define chk(a) .check = X86_CHECK_##a,
+#define svm(a) .intercept = SVM_EXIT_##a,
+
#define avx2_256 .vex_special = X86_VEX_AVX2_256,
#define P_00 1
};
static const X86OpEntry group15_mem[8] = {
- [2] = X86_OP_ENTRYr(LDMXCSR, E,d, vex5),
- [3] = X86_OP_ENTRYw(STMXCSR, E,d, vex5),
+ [2] = X86_OP_ENTRYr(LDMXCSR, E,d, vex5 chk(VEX128)),
+ [3] = X86_OP_ENTRYw(STMXCSR, E,d, vex5 chk(VEX128)),
};
uint8_t modrm = get_modrm(s, env);
if (s->flags & HF_EM_MASK) {
goto illegal;
}
+
+ if (e->check & X86_CHECK_VEX128) {
+ if (s->vex_l) {
+ goto illegal;
+ }
+ }
return true;
nm_exception:
goto illegal_op;
}
+ /* Checks that result in #UD come first. */
+ if (decode.e.check) {
+ if (decode.e.check & X86_CHECK_i64) {
+ if (CODE64(s)) {
+ goto illegal_op;
+ }
+ }
+ if (decode.e.check & X86_CHECK_o64) {
+ if (!CODE64(s)) {
+ goto illegal_op;
+ }
+ }
+ if (decode.e.check & X86_CHECK_prot) {
+ if (!PE(s) || VM86(s)) {
+ goto illegal_op;
+ }
+ }
+ }
+
switch (decode.e.special) {
case X86_SPECIAL_None:
break;
}
break;
- case X86_SPECIAL_ProtMode:
- if (!PE(s) || VM86(s)) {
- goto illegal_op;
- }
- break;
-
- case X86_SPECIAL_i64:
- if (CODE64(s)) {
- goto illegal_op;
- }
- break;
- case X86_SPECIAL_o64:
- if (!CODE64(s)) {
- goto illegal_op;
- }
- break;
-
case X86_SPECIAL_ZExtOp0:
assert(decode.op[0].unit == X86_OP_INT);
if (!decode.op[0].has_ea) {
if (!validate_vex(s, &decode)) {
return;
}
+
+ /*
+ * Checks that result in #GP or VMEXIT come second. Intercepts are
+ * generally checked after non-memory exceptions (i.e. before all
+ * exceptions if there is no memory operand). Exceptions are
+ * vm86 checks (INTn, IRET, PUSHF/POPF), RSM and XSETBV (!).
+ *
+ * RSM and XSETBV will be handled in the gen_* functions
+ * instead of using chk().
+ */
+ if (decode.e.check & X86_CHECK_cpl0) {
+ if (CPL(s) != 0) {
+ goto gp_fault;
+ }
+ }
+ if (decode.e.intercept && unlikely(GUEST(s))) {
+ gen_helper_svm_check_intercept(tcg_env,
+ tcg_constant_i32(decode.e.intercept));
+ }
+ if (decode.e.check) {
+ if ((decode.e.check & X86_CHECK_vm86_iopl) && VM86(s)) {
+ if (IOPL(s) < 3) {
+ goto gp_fault;
+ }
+ } else if (decode.e.check & X86_CHECK_cpl_iopl) {
+ if (IOPL(s) < CPL(s)) {
+ goto gp_fault;
+ }
+ }
+ }
+
if (decode.e.special == X86_SPECIAL_MMX &&
!(s->prefix & (PREFIX_REPZ | PREFIX_REPNZ | PREFIX_DATA))) {
gen_helper_enter_mmx(tcg_env);
gen_writeback(s, &decode, 0, s->T0);
}
return;
+ gp_fault:
+ gen_exception_gpf(s);
+ return;
illegal_op:
gen_illegal_opcode(s);
return;
X86_OP_MMX, /* address in either s->ptrX or s->A0 depending on has_ea */
} X86OpUnit;
+typedef enum X86InsnCheck {
+ /* Illegal or exclusive to 64-bit mode */
+ X86_CHECK_i64 = 1,
+ X86_CHECK_o64 = 2,
+
+ /* Fault outside protected mode */
+ X86_CHECK_prot = 4,
+
+ /* Privileged instruction checks */
+ X86_CHECK_cpl0 = 8,
+ X86_CHECK_vm86_iopl = 16,
+ X86_CHECK_cpl_iopl = 32,
+ X86_CHECK_iopl = X86_CHECK_cpl_iopl | X86_CHECK_vm86_iopl,
+
+ /* Fault if VEX.L=1 */
+ X86_CHECK_VEX128 = 64,
+} X86InsnCheck;
+
typedef enum X86InsnSpecial {
X86_SPECIAL_None,
/* Always locked if it has a memory operand (XCHG) */
X86_SPECIAL_Locked,
- /* Fault outside protected mode */
- X86_SPECIAL_ProtMode,
-
/*
* Register operand 0/2 is zero extended to 32 bits. Rd/Mb or Rd/Mw
* in the manual.
* become P/P/Q/N, and size "x" becomes "q".
*/
X86_SPECIAL_MMX,
-
- /* Illegal or exclusive to 64-bit mode */
- X86_SPECIAL_i64,
- X86_SPECIAL_o64,
} X86InsnSpecial;
/*
X86CPUIDFeature cpuid:8;
unsigned vex_class:8;
X86VEXSpecial vex_special:8;
- uint16_t valid_prefix:16;
+ unsigned valid_prefix:16;
+ unsigned check:16;
+ unsigned intercept:8;
bool is_decode:1;
};