1 // SPDX-License-Identifier: GPL-2.0-only
2 /******************************************************************************
5 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
7 * Copyright (c) 2005 Keir Fraser
9 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
10 * privileged instructions:
12 * Copyright (C) 2006 Qumranet
13 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
15 * Avi Kivity <avi@qumranet.com>
16 * Yaniv Kamay <yaniv@qumranet.com>
18 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
21 #include <linux/kvm_host.h>
22 #include "kvm_cache_regs.h"
23 #include "kvm_emulate.h"
24 #include <linux/stringify.h>
25 #include <asm/debugreg.h>
26 #include <asm/nospec-branch.h>
37 #define OpImplicit 1ull /* No generic decode */
38 #define OpReg 2ull /* Register */
39 #define OpMem 3ull /* Memory */
40 #define OpAcc 4ull /* Accumulator: AL/AX/EAX/RAX */
41 #define OpDI 5ull /* ES:DI/EDI/RDI */
42 #define OpMem64 6ull /* Memory, 64-bit */
43 #define OpImmUByte 7ull /* Zero-extended 8-bit immediate */
44 #define OpDX 8ull /* DX register */
45 #define OpCL 9ull /* CL register (for shifts) */
46 #define OpImmByte 10ull /* 8-bit sign extended immediate */
47 #define OpOne 11ull /* Implied 1 */
48 #define OpImm 12ull /* Sign extended up to 32-bit immediate */
49 #define OpMem16 13ull /* Memory operand (16-bit). */
50 #define OpMem32 14ull /* Memory operand (32-bit). */
51 #define OpImmU 15ull /* Immediate operand, zero extended */
52 #define OpSI 16ull /* SI/ESI/RSI */
53 #define OpImmFAddr 17ull /* Immediate far address */
54 #define OpMemFAddr 18ull /* Far address in memory */
55 #define OpImmU16 19ull /* Immediate operand, 16 bits, zero extended */
56 #define OpES 20ull /* ES */
57 #define OpCS 21ull /* CS */
58 #define OpSS 22ull /* SS */
59 #define OpDS 23ull /* DS */
60 #define OpFS 24ull /* FS */
61 #define OpGS 25ull /* GS */
62 #define OpMem8 26ull /* 8-bit zero extended memory operand */
63 #define OpImm64 27ull /* Sign extended 16/32/64-bit immediate */
64 #define OpXLat 28ull /* memory at BX/EBX/RBX + zero-extended AL */
65 #define OpAccLo 29ull /* Low part of extended acc (AX/AX/EAX/RAX) */
66 #define OpAccHi 30ull /* High part of extended acc (-/DX/EDX/RDX) */
68 #define OpBits 5 /* Width of operand field */
69 #define OpMask ((1ull << OpBits) - 1)
72 * Opcode effective-address decode tables.
73 * Note that we only emulate instructions that have at least one memory
74 * operand (excluding implicit stack references). We assume that stack
75 * references and instruction fetches will never occur in special memory
76 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
80 /* Operand sizes: 8-bit operands or specified/overridden size. */
81 #define ByteOp (1<<0) /* 8-bit operands. */
82 /* Destination operand type. */
84 #define ImplicitOps (OpImplicit << DstShift)
85 #define DstReg (OpReg << DstShift)
86 #define DstMem (OpMem << DstShift)
87 #define DstAcc (OpAcc << DstShift)
88 #define DstDI (OpDI << DstShift)
89 #define DstMem64 (OpMem64 << DstShift)
90 #define DstMem16 (OpMem16 << DstShift)
91 #define DstImmUByte (OpImmUByte << DstShift)
92 #define DstDX (OpDX << DstShift)
93 #define DstAccLo (OpAccLo << DstShift)
94 #define DstMask (OpMask << DstShift)
95 /* Source operand type. */
97 #define SrcNone (OpNone << SrcShift)
98 #define SrcReg (OpReg << SrcShift)
99 #define SrcMem (OpMem << SrcShift)
100 #define SrcMem16 (OpMem16 << SrcShift)
101 #define SrcMem32 (OpMem32 << SrcShift)
102 #define SrcImm (OpImm << SrcShift)
103 #define SrcImmByte (OpImmByte << SrcShift)
104 #define SrcOne (OpOne << SrcShift)
105 #define SrcImmUByte (OpImmUByte << SrcShift)
106 #define SrcImmU (OpImmU << SrcShift)
107 #define SrcSI (OpSI << SrcShift)
108 #define SrcXLat (OpXLat << SrcShift)
109 #define SrcImmFAddr (OpImmFAddr << SrcShift)
110 #define SrcMemFAddr (OpMemFAddr << SrcShift)
111 #define SrcAcc (OpAcc << SrcShift)
112 #define SrcImmU16 (OpImmU16 << SrcShift)
113 #define SrcImm64 (OpImm64 << SrcShift)
114 #define SrcDX (OpDX << SrcShift)
115 #define SrcMem8 (OpMem8 << SrcShift)
116 #define SrcAccHi (OpAccHi << SrcShift)
117 #define SrcMask (OpMask << SrcShift)
118 #define BitOp (1<<11)
119 #define MemAbs (1<<12) /* Memory operand is absolute displacement */
120 #define String (1<<13) /* String instruction (rep capable) */
121 #define Stack (1<<14) /* Stack instruction (push/pop) */
122 #define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */
123 #define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */
124 #define GroupDual (2<<15) /* Alternate decoding of mod == 3 */
125 #define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */
126 #define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */
127 #define Escape (5<<15) /* Escape to coprocessor instruction */
128 #define InstrDual (6<<15) /* Alternate instruction decoding of mod == 3 */
129 #define ModeDual (7<<15) /* Different instruction for 32/64 bit */
130 #define Sse (1<<18) /* SSE Vector instruction */
131 /* Generic ModRM decode. */
132 #define ModRM (1<<19)
133 /* Destination is only written; never read. */
136 #define Prot (1<<21) /* instruction generates #UD if not in prot-mode */
137 #define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */
138 #define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
139 #define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */
140 #define Undefined (1<<25) /* No Such Instruction */
141 #define Lock (1<<26) /* lock prefix is allowed for the instruction */
142 #define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
144 #define PageTable (1 << 29) /* instruction used to write page table */
145 #define NotImpl (1 << 30) /* instruction is not implemented */
146 /* Source 2 operand type */
147 #define Src2Shift (31)
148 #define Src2None (OpNone << Src2Shift)
149 #define Src2Mem (OpMem << Src2Shift)
150 #define Src2CL (OpCL << Src2Shift)
151 #define Src2ImmByte (OpImmByte << Src2Shift)
152 #define Src2One (OpOne << Src2Shift)
153 #define Src2Imm (OpImm << Src2Shift)
154 #define Src2ES (OpES << Src2Shift)
155 #define Src2CS (OpCS << Src2Shift)
156 #define Src2SS (OpSS << Src2Shift)
157 #define Src2DS (OpDS << Src2Shift)
158 #define Src2FS (OpFS << Src2Shift)
159 #define Src2GS (OpGS << Src2Shift)
160 #define Src2Mask (OpMask << Src2Shift)
161 #define Mmx ((u64)1 << 40) /* MMX Vector instruction */
162 #define AlignMask ((u64)7 << 41)
163 #define Aligned ((u64)1 << 41) /* Explicitly aligned (e.g. MOVDQA) */
164 #define Unaligned ((u64)2 << 41) /* Explicitly unaligned (e.g. MOVDQU) */
165 #define Avx ((u64)3 << 41) /* Advanced Vector Extensions */
166 #define Aligned16 ((u64)4 << 41) /* Aligned to 16 byte boundary (e.g. FXSAVE) */
167 #define Fastop ((u64)1 << 44) /* Use opcode::u.fastop */
168 #define NoWrite ((u64)1 << 45) /* No writeback */
169 #define SrcWrite ((u64)1 << 46) /* Write back src operand */
170 #define NoMod ((u64)1 << 47) /* Mod field is ignored */
171 #define Intercept ((u64)1 << 48) /* Has valid intercept field */
172 #define CheckPerm ((u64)1 << 49) /* Has valid check_perm field */
173 #define PrivUD ((u64)1 << 51) /* #UD instead of #GP on CPL > 0 */
174 #define NearBranch ((u64)1 << 52) /* Near branches */
175 #define No16 ((u64)1 << 53) /* No 16 bit operand */
176 #define IncSP ((u64)1 << 54) /* SP is incremented before ModRM calc */
177 #define TwoMemOp ((u64)1 << 55) /* Instruction has two memory operand */
178 #define IsBranch ((u64)1 << 56) /* Instruction is considered a branch. */
180 #define DstXacc (DstAccLo | SrcAccHi | SrcWrite)
182 #define X2(x...) x, x
183 #define X3(x...) X2(x), x
184 #define X4(x...) X2(x), X2(x)
185 #define X5(x...) X4(x), x
186 #define X6(x...) X4(x), X2(x)
187 #define X7(x...) X4(x), X3(x)
188 #define X8(x...) X4(x), X4(x)
189 #define X16(x...) X8(x), X8(x)
191 #define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
192 #define FASTOP_SIZE 8
199 int (*execute
)(struct x86_emulate_ctxt
*ctxt
);
200 const struct opcode
*group
;
201 const struct group_dual
*gdual
;
202 const struct gprefix
*gprefix
;
203 const struct escape
*esc
;
204 const struct instr_dual
*idual
;
205 const struct mode_dual
*mdual
;
206 void (*fastop
)(struct fastop
*fake
);
208 int (*check_perm
)(struct x86_emulate_ctxt
*ctxt
);
212 struct opcode mod012
[8];
213 struct opcode mod3
[8];
217 struct opcode pfx_no
;
218 struct opcode pfx_66
;
219 struct opcode pfx_f2
;
220 struct opcode pfx_f3
;
225 struct opcode high
[64];
229 struct opcode mod012
;
234 struct opcode mode32
;
235 struct opcode mode64
;
238 #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
240 enum x86_transfer_type
{
242 X86_TRANSFER_CALL_JMP
,
244 X86_TRANSFER_TASK_SWITCH
,
247 static ulong
reg_read(struct x86_emulate_ctxt
*ctxt
, unsigned nr
)
249 if (!(ctxt
->regs_valid
& (1 << nr
))) {
250 ctxt
->regs_valid
|= 1 << nr
;
251 ctxt
->_regs
[nr
] = ctxt
->ops
->read_gpr(ctxt
, nr
);
253 return ctxt
->_regs
[nr
];
256 static ulong
*reg_write(struct x86_emulate_ctxt
*ctxt
, unsigned nr
)
258 ctxt
->regs_valid
|= 1 << nr
;
259 ctxt
->regs_dirty
|= 1 << nr
;
260 return &ctxt
->_regs
[nr
];
263 static ulong
*reg_rmw(struct x86_emulate_ctxt
*ctxt
, unsigned nr
)
266 return reg_write(ctxt
, nr
);
269 static void writeback_registers(struct x86_emulate_ctxt
*ctxt
)
273 for_each_set_bit(reg
, (ulong
*)&ctxt
->regs_dirty
, 16)
274 ctxt
->ops
->write_gpr(ctxt
, reg
, ctxt
->_regs
[reg
]);
277 static void invalidate_registers(struct x86_emulate_ctxt
*ctxt
)
279 ctxt
->regs_dirty
= 0;
280 ctxt
->regs_valid
= 0;
284 * These EFLAGS bits are restored from saved value during emulation, and
285 * any changes are written back to the saved value after emulation.
287 #define EFLAGS_MASK (X86_EFLAGS_OF|X86_EFLAGS_SF|X86_EFLAGS_ZF|X86_EFLAGS_AF|\
288 X86_EFLAGS_PF|X86_EFLAGS_CF)
297 * fastop functions have a special calling convention:
302 * flags: rflags (in/out)
303 * ex: rsi (in:fastop pointer, out:zero if exception)
305 * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
306 * different operand sizes can be reached by calculation, rather than a jump
307 * table (which would be bigger than the code).
309 static int fastop(struct x86_emulate_ctxt
*ctxt
, fastop_t fop
);
311 #define __FOP_FUNC(name) \
312 ".align " __stringify(FASTOP_SIZE) " \n\t" \
313 ".type " name ", @function \n\t" \
316 #define FOP_FUNC(name) \
319 #define __FOP_RET(name) \
321 ".size " name ", .-" name "\n\t"
323 #define FOP_RET(name) \
326 #define FOP_START(op) \
327 extern void em_##op(struct fastop *fake); \
328 asm(".pushsection .text, \"ax\" \n\t" \
329 ".global em_" #op " \n\t" \
330 ".align " __stringify(FASTOP_SIZE) " \n\t" \
336 #define __FOPNOP(name) \
341 __FOPNOP(__stringify(__UNIQUE_ID(nop)))
343 #define FOP1E(op, dst) \
344 __FOP_FUNC(#op "_" #dst) \
345 "10: " #op " %" #dst " \n\t" \
346 __FOP_RET(#op "_" #dst)
348 #define FOP1EEX(op, dst) \
349 FOP1E(op, dst) _ASM_EXTABLE_TYPE_REG(10b, 11b, EX_TYPE_ZERO_REG, %%esi)
351 #define FASTOP1(op) \
356 ON64(FOP1E(op##q, rax)) \
359 /* 1-operand, using src2 (for MUL/DIV r/m) */
360 #define FASTOP1SRC2(op, name) \
365 ON64(FOP1E(op, rcx)) \
368 /* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */
369 #define FASTOP1SRC2EX(op, name) \
374 ON64(FOP1EEX(op, rcx)) \
377 #define FOP2E(op, dst, src) \
378 __FOP_FUNC(#op "_" #dst "_" #src) \
379 #op " %" #src ", %" #dst " \n\t" \
380 __FOP_RET(#op "_" #dst "_" #src)
382 #define FASTOP2(op) \
384 FOP2E(op##b, al, dl) \
385 FOP2E(op##w, ax, dx) \
386 FOP2E(op##l, eax, edx) \
387 ON64(FOP2E(op##q, rax, rdx)) \
390 /* 2 operand, word only */
391 #define FASTOP2W(op) \
394 FOP2E(op##w, ax, dx) \
395 FOP2E(op##l, eax, edx) \
396 ON64(FOP2E(op##q, rax, rdx)) \
399 /* 2 operand, src is CL */
400 #define FASTOP2CL(op) \
402 FOP2E(op##b, al, cl) \
403 FOP2E(op##w, ax, cl) \
404 FOP2E(op##l, eax, cl) \
405 ON64(FOP2E(op##q, rax, cl)) \
408 /* 2 operand, src and dest are reversed */
409 #define FASTOP2R(op, name) \
411 FOP2E(op##b, dl, al) \
412 FOP2E(op##w, dx, ax) \
413 FOP2E(op##l, edx, eax) \
414 ON64(FOP2E(op##q, rdx, rax)) \
417 #define FOP3E(op, dst, src, src2) \
418 __FOP_FUNC(#op "_" #dst "_" #src "_" #src2) \
419 #op " %" #src2 ", %" #src ", %" #dst " \n\t"\
420 __FOP_RET(#op "_" #dst "_" #src "_" #src2)
422 /* 3-operand, word-only, src2=cl */
423 #define FASTOP3WCL(op) \
426 FOP3E(op##w, ax, dx, cl) \
427 FOP3E(op##l, eax, edx, cl) \
428 ON64(FOP3E(op##q, rax, rdx, cl)) \
431 /* Special case for SETcc - 1 instruction per cc */
432 #define FOP_SETCC(op) \
434 ".type " #op ", @function \n\t" \
460 "pushf; sbb %al, %al; popf \n\t"
465 * XXX: inoutclob user must know where the argument is being expanded.
466 * Relying on CONFIG_CC_HAS_ASM_GOTO would allow us to remove _fault.
468 #define asm_safe(insn, inoutclob...) \
472 asm volatile("1:" insn "\n" \
474 _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_ONE_REG, %[_fault]) \
475 : [_fault] "+r"(_fault) inoutclob ); \
477 _fault ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE; \
480 static int emulator_check_intercept(struct x86_emulate_ctxt
*ctxt
,
481 enum x86_intercept intercept
,
482 enum x86_intercept_stage stage
)
484 struct x86_instruction_info info
= {
485 .intercept
= intercept
,
486 .rep_prefix
= ctxt
->rep_prefix
,
487 .modrm_mod
= ctxt
->modrm_mod
,
488 .modrm_reg
= ctxt
->modrm_reg
,
489 .modrm_rm
= ctxt
->modrm_rm
,
490 .src_val
= ctxt
->src
.val64
,
491 .dst_val
= ctxt
->dst
.val64
,
492 .src_bytes
= ctxt
->src
.bytes
,
493 .dst_bytes
= ctxt
->dst
.bytes
,
494 .ad_bytes
= ctxt
->ad_bytes
,
495 .next_rip
= ctxt
->eip
,
498 return ctxt
->ops
->intercept(ctxt
, &info
, stage
);
501 static void assign_masked(ulong
*dest
, ulong src
, ulong mask
)
503 *dest
= (*dest
& ~mask
) | (src
& mask
);
506 static void assign_register(unsigned long *reg
, u64 val
, int bytes
)
508 /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
511 *(u8
*)reg
= (u8
)val
;
514 *(u16
*)reg
= (u16
)val
;
518 break; /* 64b: zero-extend */
525 static inline unsigned long ad_mask(struct x86_emulate_ctxt
*ctxt
)
527 return (1UL << (ctxt
->ad_bytes
<< 3)) - 1;
530 static ulong
stack_mask(struct x86_emulate_ctxt
*ctxt
)
533 struct desc_struct ss
;
535 if (ctxt
->mode
== X86EMUL_MODE_PROT64
)
537 ctxt
->ops
->get_segment(ctxt
, &sel
, &ss
, NULL
, VCPU_SREG_SS
);
538 return ~0U >> ((ss
.d
^ 1) * 16); /* d=0: 0xffff; d=1: 0xffffffff */
541 static int stack_size(struct x86_emulate_ctxt
*ctxt
)
543 return (__fls(stack_mask(ctxt
)) + 1) >> 3;
546 /* Access/update address held in a register, based on addressing mode. */
547 static inline unsigned long
548 address_mask(struct x86_emulate_ctxt
*ctxt
, unsigned long reg
)
550 if (ctxt
->ad_bytes
== sizeof(unsigned long))
553 return reg
& ad_mask(ctxt
);
556 static inline unsigned long
557 register_address(struct x86_emulate_ctxt
*ctxt
, int reg
)
559 return address_mask(ctxt
, reg_read(ctxt
, reg
));
562 static void masked_increment(ulong
*reg
, ulong mask
, int inc
)
564 assign_masked(reg
, *reg
+ inc
, mask
);
568 register_address_increment(struct x86_emulate_ctxt
*ctxt
, int reg
, int inc
)
570 ulong
*preg
= reg_rmw(ctxt
, reg
);
572 assign_register(preg
, *preg
+ inc
, ctxt
->ad_bytes
);
575 static void rsp_increment(struct x86_emulate_ctxt
*ctxt
, int inc
)
577 masked_increment(reg_rmw(ctxt
, VCPU_REGS_RSP
), stack_mask(ctxt
), inc
);
580 static u32
desc_limit_scaled(struct desc_struct
*desc
)
582 u32 limit
= get_desc_limit(desc
);
584 return desc
->g
? (limit
<< 12) | 0xfff : limit
;
587 static unsigned long seg_base(struct x86_emulate_ctxt
*ctxt
, int seg
)
589 if (ctxt
->mode
== X86EMUL_MODE_PROT64
&& seg
< VCPU_SREG_FS
)
592 return ctxt
->ops
->get_cached_segment_base(ctxt
, seg
);
595 static int emulate_exception(struct x86_emulate_ctxt
*ctxt
, int vec
,
596 u32 error
, bool valid
)
599 ctxt
->exception
.vector
= vec
;
600 ctxt
->exception
.error_code
= error
;
601 ctxt
->exception
.error_code_valid
= valid
;
602 return X86EMUL_PROPAGATE_FAULT
;
605 static int emulate_db(struct x86_emulate_ctxt
*ctxt
)
607 return emulate_exception(ctxt
, DB_VECTOR
, 0, false);
610 static int emulate_gp(struct x86_emulate_ctxt
*ctxt
, int err
)
612 return emulate_exception(ctxt
, GP_VECTOR
, err
, true);
615 static int emulate_ss(struct x86_emulate_ctxt
*ctxt
, int err
)
617 return emulate_exception(ctxt
, SS_VECTOR
, err
, true);
620 static int emulate_ud(struct x86_emulate_ctxt
*ctxt
)
622 return emulate_exception(ctxt
, UD_VECTOR
, 0, false);
625 static int emulate_ts(struct x86_emulate_ctxt
*ctxt
, int err
)
627 return emulate_exception(ctxt
, TS_VECTOR
, err
, true);
630 static int emulate_de(struct x86_emulate_ctxt
*ctxt
)
632 return emulate_exception(ctxt
, DE_VECTOR
, 0, false);
635 static int emulate_nm(struct x86_emulate_ctxt
*ctxt
)
637 return emulate_exception(ctxt
, NM_VECTOR
, 0, false);
640 static u16
get_segment_selector(struct x86_emulate_ctxt
*ctxt
, unsigned seg
)
643 struct desc_struct desc
;
645 ctxt
->ops
->get_segment(ctxt
, &selector
, &desc
, NULL
, seg
);
649 static void set_segment_selector(struct x86_emulate_ctxt
*ctxt
, u16 selector
,
654 struct desc_struct desc
;
656 ctxt
->ops
->get_segment(ctxt
, &dummy
, &desc
, &base3
, seg
);
657 ctxt
->ops
->set_segment(ctxt
, selector
, &desc
, base3
, seg
);
660 static inline u8
ctxt_virt_addr_bits(struct x86_emulate_ctxt
*ctxt
)
662 return (ctxt
->ops
->get_cr(ctxt
, 4) & X86_CR4_LA57
) ? 57 : 48;
665 static inline bool emul_is_noncanonical_address(u64 la
,
666 struct x86_emulate_ctxt
*ctxt
)
668 return get_canonical(la
, ctxt_virt_addr_bits(ctxt
)) != la
;
672 * x86 defines three classes of vector instructions: explicitly
673 * aligned, explicitly unaligned, and the rest, which change behaviour
674 * depending on whether they're AVX encoded or not.
676 * Also included is CMPXCHG16B which is not a vector instruction, yet it is
677 * subject to the same check. FXSAVE and FXRSTOR are checked here too as their
678 * 512 bytes of data must be aligned to a 16 byte boundary.
680 static unsigned insn_alignment(struct x86_emulate_ctxt
*ctxt
, unsigned size
)
682 u64 alignment
= ctxt
->d
& AlignMask
;
684 if (likely(size
< 16))
699 static __always_inline
int __linearize(struct x86_emulate_ctxt
*ctxt
,
700 struct segmented_address addr
,
701 unsigned *max_size
, unsigned size
,
702 bool write
, bool fetch
,
703 enum x86emul_mode mode
, ulong
*linear
)
705 struct desc_struct desc
;
712 la
= seg_base(ctxt
, addr
.seg
) + addr
.ea
;
715 case X86EMUL_MODE_PROT64
:
717 va_bits
= ctxt_virt_addr_bits(ctxt
);
718 if (get_canonical(la
, va_bits
) != la
)
721 *max_size
= min_t(u64
, ~0u, (1ull << va_bits
) - la
);
722 if (size
> *max_size
)
726 *linear
= la
= (u32
)la
;
727 usable
= ctxt
->ops
->get_segment(ctxt
, &sel
, &desc
, NULL
,
731 /* code segment in protected mode or read-only data segment */
732 if ((((ctxt
->mode
!= X86EMUL_MODE_REAL
) && (desc
.type
& 8))
733 || !(desc
.type
& 2)) && write
)
735 /* unreadable code segment */
736 if (!fetch
&& (desc
.type
& 8) && !(desc
.type
& 2))
738 lim
= desc_limit_scaled(&desc
);
739 if (!(desc
.type
& 8) && (desc
.type
& 4)) {
740 /* expand-down segment */
743 lim
= desc
.d
? 0xffffffff : 0xffff;
747 if (lim
== 0xffffffff)
750 *max_size
= (u64
)lim
+ 1 - addr
.ea
;
751 if (size
> *max_size
)
756 if (la
& (insn_alignment(ctxt
, size
) - 1))
757 return emulate_gp(ctxt
, 0);
758 return X86EMUL_CONTINUE
;
760 if (addr
.seg
== VCPU_SREG_SS
)
761 return emulate_ss(ctxt
, 0);
763 return emulate_gp(ctxt
, 0);
766 static int linearize(struct x86_emulate_ctxt
*ctxt
,
767 struct segmented_address addr
,
768 unsigned size
, bool write
,
772 return __linearize(ctxt
, addr
, &max_size
, size
, write
, false,
776 static inline int assign_eip(struct x86_emulate_ctxt
*ctxt
, ulong dst
,
777 enum x86emul_mode mode
)
782 struct segmented_address addr
= { .seg
= VCPU_SREG_CS
,
785 if (ctxt
->op_bytes
!= sizeof(unsigned long))
786 addr
.ea
= dst
& ((1UL << (ctxt
->op_bytes
<< 3)) - 1);
787 rc
= __linearize(ctxt
, addr
, &max_size
, 1, false, true, mode
, &linear
);
788 if (rc
== X86EMUL_CONTINUE
)
789 ctxt
->_eip
= addr
.ea
;
793 static inline int assign_eip_near(struct x86_emulate_ctxt
*ctxt
, ulong dst
)
795 return assign_eip(ctxt
, dst
, ctxt
->mode
);
798 static int assign_eip_far(struct x86_emulate_ctxt
*ctxt
, ulong dst
,
799 const struct desc_struct
*cs_desc
)
801 enum x86emul_mode mode
= ctxt
->mode
;
805 if (ctxt
->mode
>= X86EMUL_MODE_PROT16
) {
809 ctxt
->ops
->get_msr(ctxt
, MSR_EFER
, &efer
);
811 mode
= X86EMUL_MODE_PROT64
;
813 mode
= X86EMUL_MODE_PROT32
; /* temporary value */
816 if (mode
== X86EMUL_MODE_PROT16
|| mode
== X86EMUL_MODE_PROT32
)
817 mode
= cs_desc
->d
? X86EMUL_MODE_PROT32
: X86EMUL_MODE_PROT16
;
818 rc
= assign_eip(ctxt
, dst
, mode
);
819 if (rc
== X86EMUL_CONTINUE
)
824 static inline int jmp_rel(struct x86_emulate_ctxt
*ctxt
, int rel
)
826 return assign_eip_near(ctxt
, ctxt
->_eip
+ rel
);
829 static int linear_read_system(struct x86_emulate_ctxt
*ctxt
, ulong linear
,
830 void *data
, unsigned size
)
832 return ctxt
->ops
->read_std(ctxt
, linear
, data
, size
, &ctxt
->exception
, true);
835 static int linear_write_system(struct x86_emulate_ctxt
*ctxt
,
836 ulong linear
, void *data
,
839 return ctxt
->ops
->write_std(ctxt
, linear
, data
, size
, &ctxt
->exception
, true);
842 static int segmented_read_std(struct x86_emulate_ctxt
*ctxt
,
843 struct segmented_address addr
,
850 rc
= linearize(ctxt
, addr
, size
, false, &linear
);
851 if (rc
!= X86EMUL_CONTINUE
)
853 return ctxt
->ops
->read_std(ctxt
, linear
, data
, size
, &ctxt
->exception
, false);
856 static int segmented_write_std(struct x86_emulate_ctxt
*ctxt
,
857 struct segmented_address addr
,
864 rc
= linearize(ctxt
, addr
, size
, true, &linear
);
865 if (rc
!= X86EMUL_CONTINUE
)
867 return ctxt
->ops
->write_std(ctxt
, linear
, data
, size
, &ctxt
->exception
, false);
871 * Prefetch the remaining bytes of the instruction without crossing page
872 * boundary if they are not in fetch_cache yet.
874 static int __do_insn_fetch_bytes(struct x86_emulate_ctxt
*ctxt
, int op_size
)
877 unsigned size
, max_size
;
878 unsigned long linear
;
879 int cur_size
= ctxt
->fetch
.end
- ctxt
->fetch
.data
;
880 struct segmented_address addr
= { .seg
= VCPU_SREG_CS
,
881 .ea
= ctxt
->eip
+ cur_size
};
884 * We do not know exactly how many bytes will be needed, and
885 * __linearize is expensive, so fetch as much as possible. We
886 * just have to avoid going beyond the 15 byte limit, the end
887 * of the segment, or the end of the page.
889 * __linearize is called with size 0 so that it does not do any
890 * boundary check itself. Instead, we use max_size to check
893 rc
= __linearize(ctxt
, addr
, &max_size
, 0, false, true, ctxt
->mode
,
895 if (unlikely(rc
!= X86EMUL_CONTINUE
))
898 size
= min_t(unsigned, 15UL ^ cur_size
, max_size
);
899 size
= min_t(unsigned, size
, PAGE_SIZE
- offset_in_page(linear
));
902 * One instruction can only straddle two pages,
903 * and one has been loaded at the beginning of
904 * x86_decode_insn. So, if not enough bytes
905 * still, we must have hit the 15-byte boundary.
907 if (unlikely(size
< op_size
))
908 return emulate_gp(ctxt
, 0);
910 rc
= ctxt
->ops
->fetch(ctxt
, linear
, ctxt
->fetch
.end
,
911 size
, &ctxt
->exception
);
912 if (unlikely(rc
!= X86EMUL_CONTINUE
))
914 ctxt
->fetch
.end
+= size
;
915 return X86EMUL_CONTINUE
;
918 static __always_inline
int do_insn_fetch_bytes(struct x86_emulate_ctxt
*ctxt
,
921 unsigned done_size
= ctxt
->fetch
.end
- ctxt
->fetch
.ptr
;
923 if (unlikely(done_size
< size
))
924 return __do_insn_fetch_bytes(ctxt
, size
- done_size
);
926 return X86EMUL_CONTINUE
;
929 /* Fetch next part of the instruction being emulated. */
930 #define insn_fetch(_type, _ctxt) \
933 rc = do_insn_fetch_bytes(_ctxt, sizeof(_type)); \
934 if (rc != X86EMUL_CONTINUE) \
936 ctxt->_eip += sizeof(_type); \
937 memcpy(&_x, ctxt->fetch.ptr, sizeof(_type)); \
938 ctxt->fetch.ptr += sizeof(_type); \
942 #define insn_fetch_arr(_arr, _size, _ctxt) \
944 rc = do_insn_fetch_bytes(_ctxt, _size); \
945 if (rc != X86EMUL_CONTINUE) \
947 ctxt->_eip += (_size); \
948 memcpy(_arr, ctxt->fetch.ptr, _size); \
949 ctxt->fetch.ptr += (_size); \
953 * Given the 'reg' portion of a ModRM byte, and a register block, return a
954 * pointer into the block that addresses the relevant register.
955 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
957 static void *decode_register(struct x86_emulate_ctxt
*ctxt
, u8 modrm_reg
,
961 int highbyte_regs
= (ctxt
->rex_prefix
== 0) && byteop
;
963 if (highbyte_regs
&& modrm_reg
>= 4 && modrm_reg
< 8)
964 p
= (unsigned char *)reg_rmw(ctxt
, modrm_reg
& 3) + 1;
966 p
= reg_rmw(ctxt
, modrm_reg
);
970 static int read_descriptor(struct x86_emulate_ctxt
*ctxt
,
971 struct segmented_address addr
,
972 u16
*size
, unsigned long *address
, int op_bytes
)
979 rc
= segmented_read_std(ctxt
, addr
, size
, 2);
980 if (rc
!= X86EMUL_CONTINUE
)
983 rc
= segmented_read_std(ctxt
, addr
, address
, op_bytes
);
997 FASTOP1SRC2(mul
, mul_ex
);
998 FASTOP1SRC2(imul
, imul_ex
);
999 FASTOP1SRC2EX(div
, div_ex
);
1000 FASTOP1SRC2EX(idiv
, idiv_ex
);
1029 FASTOP2R(cmp
, cmp_r
);
1031 static int em_bsf_c(struct x86_emulate_ctxt
*ctxt
)
1033 /* If src is zero, do not writeback, but update flags */
1034 if (ctxt
->src
.val
== 0)
1035 ctxt
->dst
.type
= OP_NONE
;
1036 return fastop(ctxt
, em_bsf
);
1039 static int em_bsr_c(struct x86_emulate_ctxt
*ctxt
)
1041 /* If src is zero, do not writeback, but update flags */
1042 if (ctxt
->src
.val
== 0)
1043 ctxt
->dst
.type
= OP_NONE
;
1044 return fastop(ctxt
, em_bsr
);
1047 static __always_inline u8
test_cc(unsigned int condition
, unsigned long flags
)
1050 void (*fop
)(void) = (void *)em_setcc
+ 4 * (condition
& 0xf);
1052 flags
= (flags
& EFLAGS_MASK
) | X86_EFLAGS_IF
;
1053 asm("push %[flags]; popf; " CALL_NOSPEC
1054 : "=a"(rc
) : [thunk_target
]"r"(fop
), [flags
]"r"(flags
));
1058 static void fetch_register_operand(struct operand
*op
)
1060 switch (op
->bytes
) {
1062 op
->val
= *(u8
*)op
->addr
.reg
;
1065 op
->val
= *(u16
*)op
->addr
.reg
;
1068 op
->val
= *(u32
*)op
->addr
.reg
;
1071 op
->val
= *(u64
*)op
->addr
.reg
;
1076 static int em_fninit(struct x86_emulate_ctxt
*ctxt
)
1078 if (ctxt
->ops
->get_cr(ctxt
, 0) & (X86_CR0_TS
| X86_CR0_EM
))
1079 return emulate_nm(ctxt
);
1082 asm volatile("fninit");
1084 return X86EMUL_CONTINUE
;
1087 static int em_fnstcw(struct x86_emulate_ctxt
*ctxt
)
1091 if (ctxt
->ops
->get_cr(ctxt
, 0) & (X86_CR0_TS
| X86_CR0_EM
))
1092 return emulate_nm(ctxt
);
1095 asm volatile("fnstcw %0": "+m"(fcw
));
1098 ctxt
->dst
.val
= fcw
;
1100 return X86EMUL_CONTINUE
;
1103 static int em_fnstsw(struct x86_emulate_ctxt
*ctxt
)
1107 if (ctxt
->ops
->get_cr(ctxt
, 0) & (X86_CR0_TS
| X86_CR0_EM
))
1108 return emulate_nm(ctxt
);
1111 asm volatile("fnstsw %0": "+m"(fsw
));
1114 ctxt
->dst
.val
= fsw
;
1116 return X86EMUL_CONTINUE
;
1119 static void decode_register_operand(struct x86_emulate_ctxt
*ctxt
,
1122 unsigned reg
= ctxt
->modrm_reg
;
1124 if (!(ctxt
->d
& ModRM
))
1125 reg
= (ctxt
->b
& 7) | ((ctxt
->rex_prefix
& 1) << 3);
1127 if (ctxt
->d
& Sse
) {
1131 kvm_read_sse_reg(reg
, &op
->vec_val
);
1134 if (ctxt
->d
& Mmx
) {
1143 op
->bytes
= (ctxt
->d
& ByteOp
) ? 1 : ctxt
->op_bytes
;
1144 op
->addr
.reg
= decode_register(ctxt
, reg
, ctxt
->d
& ByteOp
);
1146 fetch_register_operand(op
);
1147 op
->orig_val
= op
->val
;
1150 static void adjust_modrm_seg(struct x86_emulate_ctxt
*ctxt
, int base_reg
)
1152 if (base_reg
== VCPU_REGS_RSP
|| base_reg
== VCPU_REGS_RBP
)
1153 ctxt
->modrm_seg
= VCPU_SREG_SS
;
1156 static int decode_modrm(struct x86_emulate_ctxt
*ctxt
,
1160 int index_reg
, base_reg
, scale
;
1161 int rc
= X86EMUL_CONTINUE
;
1164 ctxt
->modrm_reg
= ((ctxt
->rex_prefix
<< 1) & 8); /* REX.R */
1165 index_reg
= (ctxt
->rex_prefix
<< 2) & 8; /* REX.X */
1166 base_reg
= (ctxt
->rex_prefix
<< 3) & 8; /* REX.B */
1168 ctxt
->modrm_mod
= (ctxt
->modrm
& 0xc0) >> 6;
1169 ctxt
->modrm_reg
|= (ctxt
->modrm
& 0x38) >> 3;
1170 ctxt
->modrm_rm
= base_reg
| (ctxt
->modrm
& 0x07);
1171 ctxt
->modrm_seg
= VCPU_SREG_DS
;
1173 if (ctxt
->modrm_mod
== 3 || (ctxt
->d
& NoMod
)) {
1175 op
->bytes
= (ctxt
->d
& ByteOp
) ? 1 : ctxt
->op_bytes
;
1176 op
->addr
.reg
= decode_register(ctxt
, ctxt
->modrm_rm
,
1178 if (ctxt
->d
& Sse
) {
1181 op
->addr
.xmm
= ctxt
->modrm_rm
;
1182 kvm_read_sse_reg(ctxt
->modrm_rm
, &op
->vec_val
);
1185 if (ctxt
->d
& Mmx
) {
1188 op
->addr
.mm
= ctxt
->modrm_rm
& 7;
1191 fetch_register_operand(op
);
1197 if (ctxt
->ad_bytes
== 2) {
1198 unsigned bx
= reg_read(ctxt
, VCPU_REGS_RBX
);
1199 unsigned bp
= reg_read(ctxt
, VCPU_REGS_RBP
);
1200 unsigned si
= reg_read(ctxt
, VCPU_REGS_RSI
);
1201 unsigned di
= reg_read(ctxt
, VCPU_REGS_RDI
);
1203 /* 16-bit ModR/M decode. */
1204 switch (ctxt
->modrm_mod
) {
1206 if (ctxt
->modrm_rm
== 6)
1207 modrm_ea
+= insn_fetch(u16
, ctxt
);
1210 modrm_ea
+= insn_fetch(s8
, ctxt
);
1213 modrm_ea
+= insn_fetch(u16
, ctxt
);
1216 switch (ctxt
->modrm_rm
) {
1218 modrm_ea
+= bx
+ si
;
1221 modrm_ea
+= bx
+ di
;
1224 modrm_ea
+= bp
+ si
;
1227 modrm_ea
+= bp
+ di
;
1236 if (ctxt
->modrm_mod
!= 0)
1243 if (ctxt
->modrm_rm
== 2 || ctxt
->modrm_rm
== 3 ||
1244 (ctxt
->modrm_rm
== 6 && ctxt
->modrm_mod
!= 0))
1245 ctxt
->modrm_seg
= VCPU_SREG_SS
;
1246 modrm_ea
= (u16
)modrm_ea
;
1248 /* 32/64-bit ModR/M decode. */
1249 if ((ctxt
->modrm_rm
& 7) == 4) {
1250 sib
= insn_fetch(u8
, ctxt
);
1251 index_reg
|= (sib
>> 3) & 7;
1252 base_reg
|= sib
& 7;
1255 if ((base_reg
& 7) == 5 && ctxt
->modrm_mod
== 0)
1256 modrm_ea
+= insn_fetch(s32
, ctxt
);
1258 modrm_ea
+= reg_read(ctxt
, base_reg
);
1259 adjust_modrm_seg(ctxt
, base_reg
);
1260 /* Increment ESP on POP [ESP] */
1261 if ((ctxt
->d
& IncSP
) &&
1262 base_reg
== VCPU_REGS_RSP
)
1263 modrm_ea
+= ctxt
->op_bytes
;
1266 modrm_ea
+= reg_read(ctxt
, index_reg
) << scale
;
1267 } else if ((ctxt
->modrm_rm
& 7) == 5 && ctxt
->modrm_mod
== 0) {
1268 modrm_ea
+= insn_fetch(s32
, ctxt
);
1269 if (ctxt
->mode
== X86EMUL_MODE_PROT64
)
1270 ctxt
->rip_relative
= 1;
1272 base_reg
= ctxt
->modrm_rm
;
1273 modrm_ea
+= reg_read(ctxt
, base_reg
);
1274 adjust_modrm_seg(ctxt
, base_reg
);
1276 switch (ctxt
->modrm_mod
) {
1278 modrm_ea
+= insn_fetch(s8
, ctxt
);
1281 modrm_ea
+= insn_fetch(s32
, ctxt
);
1285 op
->addr
.mem
.ea
= modrm_ea
;
1286 if (ctxt
->ad_bytes
!= 8)
1287 ctxt
->memop
.addr
.mem
.ea
= (u32
)ctxt
->memop
.addr
.mem
.ea
;
1293 static int decode_abs(struct x86_emulate_ctxt
*ctxt
,
1296 int rc
= X86EMUL_CONTINUE
;
1299 switch (ctxt
->ad_bytes
) {
1301 op
->addr
.mem
.ea
= insn_fetch(u16
, ctxt
);
1304 op
->addr
.mem
.ea
= insn_fetch(u32
, ctxt
);
1307 op
->addr
.mem
.ea
= insn_fetch(u64
, ctxt
);
1314 static void fetch_bit_operand(struct x86_emulate_ctxt
*ctxt
)
1318 if (ctxt
->dst
.type
== OP_MEM
&& ctxt
->src
.type
== OP_REG
) {
1319 mask
= ~((long)ctxt
->dst
.bytes
* 8 - 1);
1321 if (ctxt
->src
.bytes
== 2)
1322 sv
= (s16
)ctxt
->src
.val
& (s16
)mask
;
1323 else if (ctxt
->src
.bytes
== 4)
1324 sv
= (s32
)ctxt
->src
.val
& (s32
)mask
;
1326 sv
= (s64
)ctxt
->src
.val
& (s64
)mask
;
1328 ctxt
->dst
.addr
.mem
.ea
= address_mask(ctxt
,
1329 ctxt
->dst
.addr
.mem
.ea
+ (sv
>> 3));
1332 /* only subword offset */
1333 ctxt
->src
.val
&= (ctxt
->dst
.bytes
<< 3) - 1;
1336 static int read_emulated(struct x86_emulate_ctxt
*ctxt
,
1337 unsigned long addr
, void *dest
, unsigned size
)
1340 struct read_cache
*mc
= &ctxt
->mem_read
;
1342 if (mc
->pos
< mc
->end
)
1345 WARN_ON((mc
->end
+ size
) >= sizeof(mc
->data
));
1347 rc
= ctxt
->ops
->read_emulated(ctxt
, addr
, mc
->data
+ mc
->end
, size
,
1349 if (rc
!= X86EMUL_CONTINUE
)
1355 memcpy(dest
, mc
->data
+ mc
->pos
, size
);
1357 return X86EMUL_CONTINUE
;
1360 static int segmented_read(struct x86_emulate_ctxt
*ctxt
,
1361 struct segmented_address addr
,
1368 rc
= linearize(ctxt
, addr
, size
, false, &linear
);
1369 if (rc
!= X86EMUL_CONTINUE
)
1371 return read_emulated(ctxt
, linear
, data
, size
);
1374 static int segmented_write(struct x86_emulate_ctxt
*ctxt
,
1375 struct segmented_address addr
,
1382 rc
= linearize(ctxt
, addr
, size
, true, &linear
);
1383 if (rc
!= X86EMUL_CONTINUE
)
1385 return ctxt
->ops
->write_emulated(ctxt
, linear
, data
, size
,
1389 static int segmented_cmpxchg(struct x86_emulate_ctxt
*ctxt
,
1390 struct segmented_address addr
,
1391 const void *orig_data
, const void *data
,
1397 rc
= linearize(ctxt
, addr
, size
, true, &linear
);
1398 if (rc
!= X86EMUL_CONTINUE
)
1400 return ctxt
->ops
->cmpxchg_emulated(ctxt
, linear
, orig_data
, data
,
1401 size
, &ctxt
->exception
);
1404 static int pio_in_emulated(struct x86_emulate_ctxt
*ctxt
,
1405 unsigned int size
, unsigned short port
,
1408 struct read_cache
*rc
= &ctxt
->io_read
;
1410 if (rc
->pos
== rc
->end
) { /* refill pio read ahead */
1411 unsigned int in_page
, n
;
1412 unsigned int count
= ctxt
->rep_prefix
?
1413 address_mask(ctxt
, reg_read(ctxt
, VCPU_REGS_RCX
)) : 1;
1414 in_page
= (ctxt
->eflags
& X86_EFLAGS_DF
) ?
1415 offset_in_page(reg_read(ctxt
, VCPU_REGS_RDI
)) :
1416 PAGE_SIZE
- offset_in_page(reg_read(ctxt
, VCPU_REGS_RDI
));
1417 n
= min3(in_page
, (unsigned int)sizeof(rc
->data
) / size
, count
);
1420 rc
->pos
= rc
->end
= 0;
1421 if (!ctxt
->ops
->pio_in_emulated(ctxt
, size
, port
, rc
->data
, n
))
1426 if (ctxt
->rep_prefix
&& (ctxt
->d
& String
) &&
1427 !(ctxt
->eflags
& X86_EFLAGS_DF
)) {
1428 ctxt
->dst
.data
= rc
->data
+ rc
->pos
;
1429 ctxt
->dst
.type
= OP_MEM_STR
;
1430 ctxt
->dst
.count
= (rc
->end
- rc
->pos
) / size
;
1433 memcpy(dest
, rc
->data
+ rc
->pos
, size
);
1439 static int read_interrupt_descriptor(struct x86_emulate_ctxt
*ctxt
,
1440 u16 index
, struct desc_struct
*desc
)
1445 ctxt
->ops
->get_idt(ctxt
, &dt
);
1447 if (dt
.size
< index
* 8 + 7)
1448 return emulate_gp(ctxt
, index
<< 3 | 0x2);
1450 addr
= dt
.address
+ index
* 8;
1451 return linear_read_system(ctxt
, addr
, desc
, sizeof(*desc
));
1454 static void get_descriptor_table_ptr(struct x86_emulate_ctxt
*ctxt
,
1455 u16 selector
, struct desc_ptr
*dt
)
1457 const struct x86_emulate_ops
*ops
= ctxt
->ops
;
1460 if (selector
& 1 << 2) {
1461 struct desc_struct desc
;
1464 memset(dt
, 0, sizeof(*dt
));
1465 if (!ops
->get_segment(ctxt
, &sel
, &desc
, &base3
,
1469 dt
->size
= desc_limit_scaled(&desc
); /* what if limit > 65535? */
1470 dt
->address
= get_desc_base(&desc
) | ((u64
)base3
<< 32);
1472 ops
->get_gdt(ctxt
, dt
);
1475 static int get_descriptor_ptr(struct x86_emulate_ctxt
*ctxt
,
1476 u16 selector
, ulong
*desc_addr_p
)
1479 u16 index
= selector
>> 3;
1482 get_descriptor_table_ptr(ctxt
, selector
, &dt
);
1484 if (dt
.size
< index
* 8 + 7)
1485 return emulate_gp(ctxt
, selector
& 0xfffc);
1487 addr
= dt
.address
+ index
* 8;
1489 #ifdef CONFIG_X86_64
1490 if (addr
>> 32 != 0) {
1493 ctxt
->ops
->get_msr(ctxt
, MSR_EFER
, &efer
);
1494 if (!(efer
& EFER_LMA
))
1499 *desc_addr_p
= addr
;
1500 return X86EMUL_CONTINUE
;
1503 /* allowed just for 8 bytes segments */
1504 static int read_segment_descriptor(struct x86_emulate_ctxt
*ctxt
,
1505 u16 selector
, struct desc_struct
*desc
,
1510 rc
= get_descriptor_ptr(ctxt
, selector
, desc_addr_p
);
1511 if (rc
!= X86EMUL_CONTINUE
)
1514 return linear_read_system(ctxt
, *desc_addr_p
, desc
, sizeof(*desc
));
1517 /* allowed just for 8 bytes segments */
1518 static int write_segment_descriptor(struct x86_emulate_ctxt
*ctxt
,
1519 u16 selector
, struct desc_struct
*desc
)
1524 rc
= get_descriptor_ptr(ctxt
, selector
, &addr
);
1525 if (rc
!= X86EMUL_CONTINUE
)
1528 return linear_write_system(ctxt
, addr
, desc
, sizeof(*desc
));
1531 static int __load_segment_descriptor(struct x86_emulate_ctxt
*ctxt
,
1532 u16 selector
, int seg
, u8 cpl
,
1533 enum x86_transfer_type transfer
,
1534 struct desc_struct
*desc
)
1536 struct desc_struct seg_desc
, old_desc
;
1538 unsigned err_vec
= GP_VECTOR
;
1540 bool null_selector
= !(selector
& ~0x3); /* 0000-0003 are null */
1546 memset(&seg_desc
, 0, sizeof(seg_desc
));
1548 if (ctxt
->mode
== X86EMUL_MODE_REAL
) {
1549 /* set real mode segment descriptor (keep limit etc. for
1551 ctxt
->ops
->get_segment(ctxt
, &dummy
, &seg_desc
, NULL
, seg
);
1552 set_desc_base(&seg_desc
, selector
<< 4);
1554 } else if (seg
<= VCPU_SREG_GS
&& ctxt
->mode
== X86EMUL_MODE_VM86
) {
1555 /* VM86 needs a clean new segment descriptor */
1556 set_desc_base(&seg_desc
, selector
<< 4);
1557 set_desc_limit(&seg_desc
, 0xffff);
1567 /* TR should be in GDT only */
1568 if (seg
== VCPU_SREG_TR
&& (selector
& (1 << 2)))
1571 /* NULL selector is not valid for TR, CS and (except for long mode) SS */
1572 if (null_selector
) {
1573 if (seg
== VCPU_SREG_CS
|| seg
== VCPU_SREG_TR
)
1576 if (seg
== VCPU_SREG_SS
) {
1577 if (ctxt
->mode
!= X86EMUL_MODE_PROT64
|| rpl
!= cpl
)
1581 * ctxt->ops->set_segment expects the CPL to be in
1582 * SS.DPL, so fake an expand-up 32-bit data segment.
1592 /* Skip all following checks */
1596 ret
= read_segment_descriptor(ctxt
, selector
, &seg_desc
, &desc_addr
);
1597 if (ret
!= X86EMUL_CONTINUE
)
1600 err_code
= selector
& 0xfffc;
1601 err_vec
= (transfer
== X86_TRANSFER_TASK_SWITCH
) ? TS_VECTOR
:
1604 /* can't load system descriptor into segment selector */
1605 if (seg
<= VCPU_SREG_GS
&& !seg_desc
.s
) {
1606 if (transfer
== X86_TRANSFER_CALL_JMP
)
1607 return X86EMUL_UNHANDLEABLE
;
1616 * segment is not a writable data segment or segment
1617 * selector's RPL != CPL or segment selector's RPL != CPL
1619 if (rpl
!= cpl
|| (seg_desc
.type
& 0xa) != 0x2 || dpl
!= cpl
)
1623 if (!(seg_desc
.type
& 8))
1626 if (transfer
== X86_TRANSFER_RET
) {
1627 /* RET can never return to an inner privilege level. */
1630 /* Outer-privilege level return is not implemented */
1632 return X86EMUL_UNHANDLEABLE
;
1634 if (transfer
== X86_TRANSFER_RET
|| transfer
== X86_TRANSFER_TASK_SWITCH
) {
1635 if (seg_desc
.type
& 4) {
1644 } else { /* X86_TRANSFER_CALL_JMP */
1645 if (seg_desc
.type
& 4) {
1651 if (rpl
> cpl
|| dpl
!= cpl
)
1655 /* in long-mode d/b must be clear if l is set */
1656 if (seg_desc
.d
&& seg_desc
.l
) {
1659 ctxt
->ops
->get_msr(ctxt
, MSR_EFER
, &efer
);
1660 if (efer
& EFER_LMA
)
1664 /* CS(RPL) <- CPL */
1665 selector
= (selector
& 0xfffc) | cpl
;
1668 if (seg_desc
.s
|| (seg_desc
.type
!= 1 && seg_desc
.type
!= 9))
1671 err_vec
= NP_VECTOR
;
1674 old_desc
= seg_desc
;
1675 seg_desc
.type
|= 2; /* busy */
1676 ret
= ctxt
->ops
->cmpxchg_emulated(ctxt
, desc_addr
, &old_desc
, &seg_desc
,
1677 sizeof(seg_desc
), &ctxt
->exception
);
1678 if (ret
!= X86EMUL_CONTINUE
)
1681 case VCPU_SREG_LDTR
:
1682 if (seg_desc
.s
|| seg_desc
.type
!= 2)
1685 default: /* DS, ES, FS, or GS */
1687 * segment is not a data or readable code segment or
1688 * ((segment is a data or nonconforming code segment)
1689 * and (both RPL and CPL > DPL))
1691 if ((seg_desc
.type
& 0xa) == 0x8 ||
1692 (((seg_desc
.type
& 0xc) != 0xc) &&
1693 (rpl
> dpl
&& cpl
> dpl
)))
1699 err_vec
= (seg
== VCPU_SREG_SS
) ? SS_VECTOR
: NP_VECTOR
;
1704 /* mark segment as accessed */
1705 if (!(seg_desc
.type
& 1)) {
1707 ret
= write_segment_descriptor(ctxt
, selector
,
1709 if (ret
!= X86EMUL_CONTINUE
)
1712 } else if (ctxt
->mode
== X86EMUL_MODE_PROT64
) {
1713 ret
= linear_read_system(ctxt
, desc_addr
+8, &base3
, sizeof(base3
));
1714 if (ret
!= X86EMUL_CONTINUE
)
1716 if (emul_is_noncanonical_address(get_desc_base(&seg_desc
) |
1717 ((u64
)base3
<< 32), ctxt
))
1718 return emulate_gp(ctxt
, 0);
1721 ctxt
->ops
->set_segment(ctxt
, selector
, &seg_desc
, base3
, seg
);
1724 return X86EMUL_CONTINUE
;
1726 return emulate_exception(ctxt
, err_vec
, err_code
, true);
1729 static int load_segment_descriptor(struct x86_emulate_ctxt
*ctxt
,
1730 u16 selector
, int seg
)
1732 u8 cpl
= ctxt
->ops
->cpl(ctxt
);
1735 * None of MOV, POP and LSS can load a NULL selector in CPL=3, but
1736 * they can load it at CPL<3 (Intel's manual says only LSS can,
1739 * However, the Intel manual says that putting IST=1/DPL=3 in
1740 * an interrupt gate will result in SS=3 (the AMD manual instead
1741 * says it doesn't), so allow SS=3 in __load_segment_descriptor
1742 * and only forbid it here.
1744 if (seg
== VCPU_SREG_SS
&& selector
== 3 &&
1745 ctxt
->mode
== X86EMUL_MODE_PROT64
)
1746 return emulate_exception(ctxt
, GP_VECTOR
, 0, true);
1748 return __load_segment_descriptor(ctxt
, selector
, seg
, cpl
,
1749 X86_TRANSFER_NONE
, NULL
);
1752 static void write_register_operand(struct operand
*op
)
1754 return assign_register(op
->addr
.reg
, op
->val
, op
->bytes
);
1757 static int writeback(struct x86_emulate_ctxt
*ctxt
, struct operand
*op
)
1761 write_register_operand(op
);
1764 if (ctxt
->lock_prefix
)
1765 return segmented_cmpxchg(ctxt
,
1771 return segmented_write(ctxt
,
1777 return segmented_write(ctxt
,
1780 op
->bytes
* op
->count
);
1783 kvm_write_sse_reg(op
->addr
.xmm
, &op
->vec_val
);
1786 kvm_write_mmx_reg(op
->addr
.mm
, &op
->mm_val
);
1794 return X86EMUL_CONTINUE
;
1797 static int push(struct x86_emulate_ctxt
*ctxt
, void *data
, int bytes
)
1799 struct segmented_address addr
;
1801 rsp_increment(ctxt
, -bytes
);
1802 addr
.ea
= reg_read(ctxt
, VCPU_REGS_RSP
) & stack_mask(ctxt
);
1803 addr
.seg
= VCPU_SREG_SS
;
1805 return segmented_write(ctxt
, addr
, data
, bytes
);
1808 static int em_push(struct x86_emulate_ctxt
*ctxt
)
1810 /* Disable writeback. */
1811 ctxt
->dst
.type
= OP_NONE
;
1812 return push(ctxt
, &ctxt
->src
.val
, ctxt
->op_bytes
);
1815 static int emulate_pop(struct x86_emulate_ctxt
*ctxt
,
1816 void *dest
, int len
)
1819 struct segmented_address addr
;
1821 addr
.ea
= reg_read(ctxt
, VCPU_REGS_RSP
) & stack_mask(ctxt
);
1822 addr
.seg
= VCPU_SREG_SS
;
1823 rc
= segmented_read(ctxt
, addr
, dest
, len
);
1824 if (rc
!= X86EMUL_CONTINUE
)
1827 rsp_increment(ctxt
, len
);
1831 static int em_pop(struct x86_emulate_ctxt
*ctxt
)
1833 return emulate_pop(ctxt
, &ctxt
->dst
.val
, ctxt
->op_bytes
);
1836 static int emulate_popf(struct x86_emulate_ctxt
*ctxt
,
1837 void *dest
, int len
)
1840 unsigned long val
, change_mask
;
1841 int iopl
= (ctxt
->eflags
& X86_EFLAGS_IOPL
) >> X86_EFLAGS_IOPL_BIT
;
1842 int cpl
= ctxt
->ops
->cpl(ctxt
);
1844 rc
= emulate_pop(ctxt
, &val
, len
);
1845 if (rc
!= X86EMUL_CONTINUE
)
1848 change_mask
= X86_EFLAGS_CF
| X86_EFLAGS_PF
| X86_EFLAGS_AF
|
1849 X86_EFLAGS_ZF
| X86_EFLAGS_SF
| X86_EFLAGS_OF
|
1850 X86_EFLAGS_TF
| X86_EFLAGS_DF
| X86_EFLAGS_NT
|
1851 X86_EFLAGS_AC
| X86_EFLAGS_ID
;
1853 switch(ctxt
->mode
) {
1854 case X86EMUL_MODE_PROT64
:
1855 case X86EMUL_MODE_PROT32
:
1856 case X86EMUL_MODE_PROT16
:
1858 change_mask
|= X86_EFLAGS_IOPL
;
1860 change_mask
|= X86_EFLAGS_IF
;
1862 case X86EMUL_MODE_VM86
:
1864 return emulate_gp(ctxt
, 0);
1865 change_mask
|= X86_EFLAGS_IF
;
1867 default: /* real mode */
1868 change_mask
|= (X86_EFLAGS_IOPL
| X86_EFLAGS_IF
);
1872 *(unsigned long *)dest
=
1873 (ctxt
->eflags
& ~change_mask
) | (val
& change_mask
);
1878 static int em_popf(struct x86_emulate_ctxt
*ctxt
)
1880 ctxt
->dst
.type
= OP_REG
;
1881 ctxt
->dst
.addr
.reg
= &ctxt
->eflags
;
1882 ctxt
->dst
.bytes
= ctxt
->op_bytes
;
1883 return emulate_popf(ctxt
, &ctxt
->dst
.val
, ctxt
->op_bytes
);
1886 static int em_enter(struct x86_emulate_ctxt
*ctxt
)
1889 unsigned frame_size
= ctxt
->src
.val
;
1890 unsigned nesting_level
= ctxt
->src2
.val
& 31;
1894 return X86EMUL_UNHANDLEABLE
;
1896 rbp
= reg_read(ctxt
, VCPU_REGS_RBP
);
1897 rc
= push(ctxt
, &rbp
, stack_size(ctxt
));
1898 if (rc
!= X86EMUL_CONTINUE
)
1900 assign_masked(reg_rmw(ctxt
, VCPU_REGS_RBP
), reg_read(ctxt
, VCPU_REGS_RSP
),
1902 assign_masked(reg_rmw(ctxt
, VCPU_REGS_RSP
),
1903 reg_read(ctxt
, VCPU_REGS_RSP
) - frame_size
,
1905 return X86EMUL_CONTINUE
;
1908 static int em_leave(struct x86_emulate_ctxt
*ctxt
)
1910 assign_masked(reg_rmw(ctxt
, VCPU_REGS_RSP
), reg_read(ctxt
, VCPU_REGS_RBP
),
1912 return emulate_pop(ctxt
, reg_rmw(ctxt
, VCPU_REGS_RBP
), ctxt
->op_bytes
);
1915 static int em_push_sreg(struct x86_emulate_ctxt
*ctxt
)
1917 int seg
= ctxt
->src2
.val
;
1919 ctxt
->src
.val
= get_segment_selector(ctxt
, seg
);
1920 if (ctxt
->op_bytes
== 4) {
1921 rsp_increment(ctxt
, -2);
1925 return em_push(ctxt
);
1928 static int em_pop_sreg(struct x86_emulate_ctxt
*ctxt
)
1930 int seg
= ctxt
->src2
.val
;
1931 unsigned long selector
;
1934 rc
= emulate_pop(ctxt
, &selector
, 2);
1935 if (rc
!= X86EMUL_CONTINUE
)
1938 if (ctxt
->modrm_reg
== VCPU_SREG_SS
)
1939 ctxt
->interruptibility
= KVM_X86_SHADOW_INT_MOV_SS
;
1940 if (ctxt
->op_bytes
> 2)
1941 rsp_increment(ctxt
, ctxt
->op_bytes
- 2);
1943 rc
= load_segment_descriptor(ctxt
, (u16
)selector
, seg
);
1947 static int em_pusha(struct x86_emulate_ctxt
*ctxt
)
1949 unsigned long old_esp
= reg_read(ctxt
, VCPU_REGS_RSP
);
1950 int rc
= X86EMUL_CONTINUE
;
1951 int reg
= VCPU_REGS_RAX
;
1953 while (reg
<= VCPU_REGS_RDI
) {
1954 (reg
== VCPU_REGS_RSP
) ?
1955 (ctxt
->src
.val
= old_esp
) : (ctxt
->src
.val
= reg_read(ctxt
, reg
));
1958 if (rc
!= X86EMUL_CONTINUE
)
1967 static int em_pushf(struct x86_emulate_ctxt
*ctxt
)
1969 ctxt
->src
.val
= (unsigned long)ctxt
->eflags
& ~X86_EFLAGS_VM
;
1970 return em_push(ctxt
);
1973 static int em_popa(struct x86_emulate_ctxt
*ctxt
)
1975 int rc
= X86EMUL_CONTINUE
;
1976 int reg
= VCPU_REGS_RDI
;
1979 while (reg
>= VCPU_REGS_RAX
) {
1980 if (reg
== VCPU_REGS_RSP
) {
1981 rsp_increment(ctxt
, ctxt
->op_bytes
);
1985 rc
= emulate_pop(ctxt
, &val
, ctxt
->op_bytes
);
1986 if (rc
!= X86EMUL_CONTINUE
)
1988 assign_register(reg_rmw(ctxt
, reg
), val
, ctxt
->op_bytes
);
1994 static int __emulate_int_real(struct x86_emulate_ctxt
*ctxt
, int irq
)
1996 const struct x86_emulate_ops
*ops
= ctxt
->ops
;
2003 /* TODO: Add limit checks */
2004 ctxt
->src
.val
= ctxt
->eflags
;
2006 if (rc
!= X86EMUL_CONTINUE
)
2009 ctxt
->eflags
&= ~(X86_EFLAGS_IF
| X86_EFLAGS_TF
| X86_EFLAGS_AC
);
2011 ctxt
->src
.val
= get_segment_selector(ctxt
, VCPU_SREG_CS
);
2013 if (rc
!= X86EMUL_CONTINUE
)
2016 ctxt
->src
.val
= ctxt
->_eip
;
2018 if (rc
!= X86EMUL_CONTINUE
)
2021 ops
->get_idt(ctxt
, &dt
);
2023 eip_addr
= dt
.address
+ (irq
<< 2);
2024 cs_addr
= dt
.address
+ (irq
<< 2) + 2;
2026 rc
= linear_read_system(ctxt
, cs_addr
, &cs
, 2);
2027 if (rc
!= X86EMUL_CONTINUE
)
2030 rc
= linear_read_system(ctxt
, eip_addr
, &eip
, 2);
2031 if (rc
!= X86EMUL_CONTINUE
)
2034 rc
= load_segment_descriptor(ctxt
, cs
, VCPU_SREG_CS
);
2035 if (rc
!= X86EMUL_CONTINUE
)
2043 int emulate_int_real(struct x86_emulate_ctxt
*ctxt
, int irq
)
2047 invalidate_registers(ctxt
);
2048 rc
= __emulate_int_real(ctxt
, irq
);
2049 if (rc
== X86EMUL_CONTINUE
)
2050 writeback_registers(ctxt
);
2054 static int emulate_int(struct x86_emulate_ctxt
*ctxt
, int irq
)
2056 switch(ctxt
->mode
) {
2057 case X86EMUL_MODE_REAL
:
2058 return __emulate_int_real(ctxt
, irq
);
2059 case X86EMUL_MODE_VM86
:
2060 case X86EMUL_MODE_PROT16
:
2061 case X86EMUL_MODE_PROT32
:
2062 case X86EMUL_MODE_PROT64
:
2064 /* Protected mode interrupts unimplemented yet */
2065 return X86EMUL_UNHANDLEABLE
;
2069 static int emulate_iret_real(struct x86_emulate_ctxt
*ctxt
)
2071 int rc
= X86EMUL_CONTINUE
;
2072 unsigned long temp_eip
= 0;
2073 unsigned long temp_eflags
= 0;
2074 unsigned long cs
= 0;
2075 unsigned long mask
= X86_EFLAGS_CF
| X86_EFLAGS_PF
| X86_EFLAGS_AF
|
2076 X86_EFLAGS_ZF
| X86_EFLAGS_SF
| X86_EFLAGS_TF
|
2077 X86_EFLAGS_IF
| X86_EFLAGS_DF
| X86_EFLAGS_OF
|
2078 X86_EFLAGS_IOPL
| X86_EFLAGS_NT
| X86_EFLAGS_RF
|
2079 X86_EFLAGS_AC
| X86_EFLAGS_ID
|
2081 unsigned long vm86_mask
= X86_EFLAGS_VM
| X86_EFLAGS_VIF
|
2084 /* TODO: Add stack limit check */
2086 rc
= emulate_pop(ctxt
, &temp_eip
, ctxt
->op_bytes
);
2088 if (rc
!= X86EMUL_CONTINUE
)
2091 if (temp_eip
& ~0xffff)
2092 return emulate_gp(ctxt
, 0);
2094 rc
= emulate_pop(ctxt
, &cs
, ctxt
->op_bytes
);
2096 if (rc
!= X86EMUL_CONTINUE
)
2099 rc
= emulate_pop(ctxt
, &temp_eflags
, ctxt
->op_bytes
);
2101 if (rc
!= X86EMUL_CONTINUE
)
2104 rc
= load_segment_descriptor(ctxt
, (u16
)cs
, VCPU_SREG_CS
);
2106 if (rc
!= X86EMUL_CONTINUE
)
2109 ctxt
->_eip
= temp_eip
;
2111 if (ctxt
->op_bytes
== 4)
2112 ctxt
->eflags
= ((temp_eflags
& mask
) | (ctxt
->eflags
& vm86_mask
));
2113 else if (ctxt
->op_bytes
== 2) {
2114 ctxt
->eflags
&= ~0xffff;
2115 ctxt
->eflags
|= temp_eflags
;
2118 ctxt
->eflags
&= ~EFLG_RESERVED_ZEROS_MASK
; /* Clear reserved zeros */
2119 ctxt
->eflags
|= X86_EFLAGS_FIXED
;
2120 ctxt
->ops
->set_nmi_mask(ctxt
, false);
2125 static int em_iret(struct x86_emulate_ctxt
*ctxt
)
2127 switch(ctxt
->mode
) {
2128 case X86EMUL_MODE_REAL
:
2129 return emulate_iret_real(ctxt
);
2130 case X86EMUL_MODE_VM86
:
2131 case X86EMUL_MODE_PROT16
:
2132 case X86EMUL_MODE_PROT32
:
2133 case X86EMUL_MODE_PROT64
:
2135 /* iret from protected mode unimplemented yet */
2136 return X86EMUL_UNHANDLEABLE
;
2140 static int em_jmp_far(struct x86_emulate_ctxt
*ctxt
)
2144 struct desc_struct new_desc
;
2145 u8 cpl
= ctxt
->ops
->cpl(ctxt
);
2147 memcpy(&sel
, ctxt
->src
.valptr
+ ctxt
->op_bytes
, 2);
2149 rc
= __load_segment_descriptor(ctxt
, sel
, VCPU_SREG_CS
, cpl
,
2150 X86_TRANSFER_CALL_JMP
,
2152 if (rc
!= X86EMUL_CONTINUE
)
2155 rc
= assign_eip_far(ctxt
, ctxt
->src
.val
, &new_desc
);
2156 /* Error handling is not implemented. */
2157 if (rc
!= X86EMUL_CONTINUE
)
2158 return X86EMUL_UNHANDLEABLE
;
2163 static int em_jmp_abs(struct x86_emulate_ctxt
*ctxt
)
2165 return assign_eip_near(ctxt
, ctxt
->src
.val
);
2168 static int em_call_near_abs(struct x86_emulate_ctxt
*ctxt
)
2173 old_eip
= ctxt
->_eip
;
2174 rc
= assign_eip_near(ctxt
, ctxt
->src
.val
);
2175 if (rc
!= X86EMUL_CONTINUE
)
2177 ctxt
->src
.val
= old_eip
;
2182 static int em_cmpxchg8b(struct x86_emulate_ctxt
*ctxt
)
2184 u64 old
= ctxt
->dst
.orig_val64
;
2186 if (ctxt
->dst
.bytes
== 16)
2187 return X86EMUL_UNHANDLEABLE
;
2189 if (((u32
) (old
>> 0) != (u32
) reg_read(ctxt
, VCPU_REGS_RAX
)) ||
2190 ((u32
) (old
>> 32) != (u32
) reg_read(ctxt
, VCPU_REGS_RDX
))) {
2191 *reg_write(ctxt
, VCPU_REGS_RAX
) = (u32
) (old
>> 0);
2192 *reg_write(ctxt
, VCPU_REGS_RDX
) = (u32
) (old
>> 32);
2193 ctxt
->eflags
&= ~X86_EFLAGS_ZF
;
2195 ctxt
->dst
.val64
= ((u64
)reg_read(ctxt
, VCPU_REGS_RCX
) << 32) |
2196 (u32
) reg_read(ctxt
, VCPU_REGS_RBX
);
2198 ctxt
->eflags
|= X86_EFLAGS_ZF
;
2200 return X86EMUL_CONTINUE
;
2203 static int em_ret(struct x86_emulate_ctxt
*ctxt
)
2208 rc
= emulate_pop(ctxt
, &eip
, ctxt
->op_bytes
);
2209 if (rc
!= X86EMUL_CONTINUE
)
2212 return assign_eip_near(ctxt
, eip
);
2215 static int em_ret_far(struct x86_emulate_ctxt
*ctxt
)
2218 unsigned long eip
, cs
;
2219 int cpl
= ctxt
->ops
->cpl(ctxt
);
2220 struct desc_struct new_desc
;
2222 rc
= emulate_pop(ctxt
, &eip
, ctxt
->op_bytes
);
2223 if (rc
!= X86EMUL_CONTINUE
)
2225 rc
= emulate_pop(ctxt
, &cs
, ctxt
->op_bytes
);
2226 if (rc
!= X86EMUL_CONTINUE
)
2228 rc
= __load_segment_descriptor(ctxt
, (u16
)cs
, VCPU_SREG_CS
, cpl
,
2231 if (rc
!= X86EMUL_CONTINUE
)
2233 rc
= assign_eip_far(ctxt
, eip
, &new_desc
);
2234 /* Error handling is not implemented. */
2235 if (rc
!= X86EMUL_CONTINUE
)
2236 return X86EMUL_UNHANDLEABLE
;
2241 static int em_ret_far_imm(struct x86_emulate_ctxt
*ctxt
)
2245 rc
= em_ret_far(ctxt
);
2246 if (rc
!= X86EMUL_CONTINUE
)
2248 rsp_increment(ctxt
, ctxt
->src
.val
);
2249 return X86EMUL_CONTINUE
;
2252 static int em_cmpxchg(struct x86_emulate_ctxt
*ctxt
)
2254 /* Save real source value, then compare EAX against destination. */
2255 ctxt
->dst
.orig_val
= ctxt
->dst
.val
;
2256 ctxt
->dst
.val
= reg_read(ctxt
, VCPU_REGS_RAX
);
2257 ctxt
->src
.orig_val
= ctxt
->src
.val
;
2258 ctxt
->src
.val
= ctxt
->dst
.orig_val
;
2259 fastop(ctxt
, em_cmp
);
2261 if (ctxt
->eflags
& X86_EFLAGS_ZF
) {
2262 /* Success: write back to memory; no update of EAX */
2263 ctxt
->src
.type
= OP_NONE
;
2264 ctxt
->dst
.val
= ctxt
->src
.orig_val
;
2266 /* Failure: write the value we saw to EAX. */
2267 ctxt
->src
.type
= OP_REG
;
2268 ctxt
->src
.addr
.reg
= reg_rmw(ctxt
, VCPU_REGS_RAX
);
2269 ctxt
->src
.val
= ctxt
->dst
.orig_val
;
2270 /* Create write-cycle to dest by writing the same value */
2271 ctxt
->dst
.val
= ctxt
->dst
.orig_val
;
2273 return X86EMUL_CONTINUE
;
2276 static int em_lseg(struct x86_emulate_ctxt
*ctxt
)
2278 int seg
= ctxt
->src2
.val
;
2282 memcpy(&sel
, ctxt
->src
.valptr
+ ctxt
->op_bytes
, 2);
2284 rc
= load_segment_descriptor(ctxt
, sel
, seg
);
2285 if (rc
!= X86EMUL_CONTINUE
)
2288 ctxt
->dst
.val
= ctxt
->src
.val
;
2292 static int emulator_has_longmode(struct x86_emulate_ctxt
*ctxt
)
2294 #ifdef CONFIG_X86_64
2295 return ctxt
->ops
->guest_has_long_mode(ctxt
);
2301 static void rsm_set_desc_flags(struct desc_struct
*desc
, u32 flags
)
2303 desc
->g
= (flags
>> 23) & 1;
2304 desc
->d
= (flags
>> 22) & 1;
2305 desc
->l
= (flags
>> 21) & 1;
2306 desc
->avl
= (flags
>> 20) & 1;
2307 desc
->p
= (flags
>> 15) & 1;
2308 desc
->dpl
= (flags
>> 13) & 3;
2309 desc
->s
= (flags
>> 12) & 1;
2310 desc
->type
= (flags
>> 8) & 15;
2313 static int rsm_load_seg_32(struct x86_emulate_ctxt
*ctxt
, const char *smstate
,
2316 struct desc_struct desc
;
2320 selector
= GET_SMSTATE(u32
, smstate
, 0x7fa8 + n
* 4);
2323 offset
= 0x7f84 + n
* 12;
2325 offset
= 0x7f2c + (n
- 3) * 12;
2327 set_desc_base(&desc
, GET_SMSTATE(u32
, smstate
, offset
+ 8));
2328 set_desc_limit(&desc
, GET_SMSTATE(u32
, smstate
, offset
+ 4));
2329 rsm_set_desc_flags(&desc
, GET_SMSTATE(u32
, smstate
, offset
));
2330 ctxt
->ops
->set_segment(ctxt
, selector
, &desc
, 0, n
);
2331 return X86EMUL_CONTINUE
;
2334 #ifdef CONFIG_X86_64
2335 static int rsm_load_seg_64(struct x86_emulate_ctxt
*ctxt
, const char *smstate
,
2338 struct desc_struct desc
;
2343 offset
= 0x7e00 + n
* 16;
2345 selector
= GET_SMSTATE(u16
, smstate
, offset
);
2346 rsm_set_desc_flags(&desc
, GET_SMSTATE(u16
, smstate
, offset
+ 2) << 8);
2347 set_desc_limit(&desc
, GET_SMSTATE(u32
, smstate
, offset
+ 4));
2348 set_desc_base(&desc
, GET_SMSTATE(u32
, smstate
, offset
+ 8));
2349 base3
= GET_SMSTATE(u32
, smstate
, offset
+ 12);
2351 ctxt
->ops
->set_segment(ctxt
, selector
, &desc
, base3
, n
);
2352 return X86EMUL_CONTINUE
;
2356 static int rsm_enter_protected_mode(struct x86_emulate_ctxt
*ctxt
,
2357 u64 cr0
, u64 cr3
, u64 cr4
)
2362 /* In order to later set CR4.PCIDE, CR3[11:0] must be zero. */
2364 if (cr4
& X86_CR4_PCIDE
) {
2369 bad
= ctxt
->ops
->set_cr(ctxt
, 3, cr3
);
2371 return X86EMUL_UNHANDLEABLE
;
2374 * First enable PAE, long mode needs it before CR0.PG = 1 is set.
2375 * Then enable protected mode. However, PCID cannot be enabled
2376 * if EFER.LMA=0, so set it separately.
2378 bad
= ctxt
->ops
->set_cr(ctxt
, 4, cr4
& ~X86_CR4_PCIDE
);
2380 return X86EMUL_UNHANDLEABLE
;
2382 bad
= ctxt
->ops
->set_cr(ctxt
, 0, cr0
);
2384 return X86EMUL_UNHANDLEABLE
;
2386 if (cr4
& X86_CR4_PCIDE
) {
2387 bad
= ctxt
->ops
->set_cr(ctxt
, 4, cr4
);
2389 return X86EMUL_UNHANDLEABLE
;
2391 bad
= ctxt
->ops
->set_cr(ctxt
, 3, cr3
| pcid
);
2393 return X86EMUL_UNHANDLEABLE
;
2398 return X86EMUL_CONTINUE
;
2401 static int rsm_load_state_32(struct x86_emulate_ctxt
*ctxt
,
2402 const char *smstate
)
2404 struct desc_struct desc
;
2407 u32 val
, cr0
, cr3
, cr4
;
2410 cr0
= GET_SMSTATE(u32
, smstate
, 0x7ffc);
2411 cr3
= GET_SMSTATE(u32
, smstate
, 0x7ff8);
2412 ctxt
->eflags
= GET_SMSTATE(u32
, smstate
, 0x7ff4) | X86_EFLAGS_FIXED
;
2413 ctxt
->_eip
= GET_SMSTATE(u32
, smstate
, 0x7ff0);
2415 for (i
= 0; i
< 8; i
++)
2416 *reg_write(ctxt
, i
) = GET_SMSTATE(u32
, smstate
, 0x7fd0 + i
* 4);
2418 val
= GET_SMSTATE(u32
, smstate
, 0x7fcc);
2420 if (ctxt
->ops
->set_dr(ctxt
, 6, val
))
2421 return X86EMUL_UNHANDLEABLE
;
2423 val
= GET_SMSTATE(u32
, smstate
, 0x7fc8);
2425 if (ctxt
->ops
->set_dr(ctxt
, 7, val
))
2426 return X86EMUL_UNHANDLEABLE
;
2428 selector
= GET_SMSTATE(u32
, smstate
, 0x7fc4);
2429 set_desc_base(&desc
, GET_SMSTATE(u32
, smstate
, 0x7f64));
2430 set_desc_limit(&desc
, GET_SMSTATE(u32
, smstate
, 0x7f60));
2431 rsm_set_desc_flags(&desc
, GET_SMSTATE(u32
, smstate
, 0x7f5c));
2432 ctxt
->ops
->set_segment(ctxt
, selector
, &desc
, 0, VCPU_SREG_TR
);
2434 selector
= GET_SMSTATE(u32
, smstate
, 0x7fc0);
2435 set_desc_base(&desc
, GET_SMSTATE(u32
, smstate
, 0x7f80));
2436 set_desc_limit(&desc
, GET_SMSTATE(u32
, smstate
, 0x7f7c));
2437 rsm_set_desc_flags(&desc
, GET_SMSTATE(u32
, smstate
, 0x7f78));
2438 ctxt
->ops
->set_segment(ctxt
, selector
, &desc
, 0, VCPU_SREG_LDTR
);
2440 dt
.address
= GET_SMSTATE(u32
, smstate
, 0x7f74);
2441 dt
.size
= GET_SMSTATE(u32
, smstate
, 0x7f70);
2442 ctxt
->ops
->set_gdt(ctxt
, &dt
);
2444 dt
.address
= GET_SMSTATE(u32
, smstate
, 0x7f58);
2445 dt
.size
= GET_SMSTATE(u32
, smstate
, 0x7f54);
2446 ctxt
->ops
->set_idt(ctxt
, &dt
);
2448 for (i
= 0; i
< 6; i
++) {
2449 int r
= rsm_load_seg_32(ctxt
, smstate
, i
);
2450 if (r
!= X86EMUL_CONTINUE
)
2454 cr4
= GET_SMSTATE(u32
, smstate
, 0x7f14);
2456 ctxt
->ops
->set_smbase(ctxt
, GET_SMSTATE(u32
, smstate
, 0x7ef8));
2458 return rsm_enter_protected_mode(ctxt
, cr0
, cr3
, cr4
);
2461 #ifdef CONFIG_X86_64
2462 static int rsm_load_state_64(struct x86_emulate_ctxt
*ctxt
,
2463 const char *smstate
)
2465 struct desc_struct desc
;
2467 u64 val
, cr0
, cr3
, cr4
;
2472 for (i
= 0; i
< 16; i
++)
2473 *reg_write(ctxt
, i
) = GET_SMSTATE(u64
, smstate
, 0x7ff8 - i
* 8);
2475 ctxt
->_eip
= GET_SMSTATE(u64
, smstate
, 0x7f78);
2476 ctxt
->eflags
= GET_SMSTATE(u32
, smstate
, 0x7f70) | X86_EFLAGS_FIXED
;
2478 val
= GET_SMSTATE(u64
, smstate
, 0x7f68);
2480 if (ctxt
->ops
->set_dr(ctxt
, 6, val
))
2481 return X86EMUL_UNHANDLEABLE
;
2483 val
= GET_SMSTATE(u64
, smstate
, 0x7f60);
2485 if (ctxt
->ops
->set_dr(ctxt
, 7, val
))
2486 return X86EMUL_UNHANDLEABLE
;
2488 cr0
= GET_SMSTATE(u64
, smstate
, 0x7f58);
2489 cr3
= GET_SMSTATE(u64
, smstate
, 0x7f50);
2490 cr4
= GET_SMSTATE(u64
, smstate
, 0x7f48);
2491 ctxt
->ops
->set_smbase(ctxt
, GET_SMSTATE(u32
, smstate
, 0x7f00));
2492 val
= GET_SMSTATE(u64
, smstate
, 0x7ed0);
2494 if (ctxt
->ops
->set_msr(ctxt
, MSR_EFER
, val
& ~EFER_LMA
))
2495 return X86EMUL_UNHANDLEABLE
;
2497 selector
= GET_SMSTATE(u32
, smstate
, 0x7e90);
2498 rsm_set_desc_flags(&desc
, GET_SMSTATE(u32
, smstate
, 0x7e92) << 8);
2499 set_desc_limit(&desc
, GET_SMSTATE(u32
, smstate
, 0x7e94));
2500 set_desc_base(&desc
, GET_SMSTATE(u32
, smstate
, 0x7e98));
2501 base3
= GET_SMSTATE(u32
, smstate
, 0x7e9c);
2502 ctxt
->ops
->set_segment(ctxt
, selector
, &desc
, base3
, VCPU_SREG_TR
);
2504 dt
.size
= GET_SMSTATE(u32
, smstate
, 0x7e84);
2505 dt
.address
= GET_SMSTATE(u64
, smstate
, 0x7e88);
2506 ctxt
->ops
->set_idt(ctxt
, &dt
);
2508 selector
= GET_SMSTATE(u32
, smstate
, 0x7e70);
2509 rsm_set_desc_flags(&desc
, GET_SMSTATE(u32
, smstate
, 0x7e72) << 8);
2510 set_desc_limit(&desc
, GET_SMSTATE(u32
, smstate
, 0x7e74));
2511 set_desc_base(&desc
, GET_SMSTATE(u32
, smstate
, 0x7e78));
2512 base3
= GET_SMSTATE(u32
, smstate
, 0x7e7c);
2513 ctxt
->ops
->set_segment(ctxt
, selector
, &desc
, base3
, VCPU_SREG_LDTR
);
2515 dt
.size
= GET_SMSTATE(u32
, smstate
, 0x7e64);
2516 dt
.address
= GET_SMSTATE(u64
, smstate
, 0x7e68);
2517 ctxt
->ops
->set_gdt(ctxt
, &dt
);
2519 r
= rsm_enter_protected_mode(ctxt
, cr0
, cr3
, cr4
);
2520 if (r
!= X86EMUL_CONTINUE
)
2523 for (i
= 0; i
< 6; i
++) {
2524 r
= rsm_load_seg_64(ctxt
, smstate
, i
);
2525 if (r
!= X86EMUL_CONTINUE
)
2529 return X86EMUL_CONTINUE
;
2533 static int em_rsm(struct x86_emulate_ctxt
*ctxt
)
2535 unsigned long cr0
, cr4
, efer
;
2540 if ((ctxt
->ops
->get_hflags(ctxt
) & X86EMUL_SMM_MASK
) == 0)
2541 return emulate_ud(ctxt
);
2543 smbase
= ctxt
->ops
->get_smbase(ctxt
);
2545 ret
= ctxt
->ops
->read_phys(ctxt
, smbase
+ 0xfe00, buf
, sizeof(buf
));
2546 if (ret
!= X86EMUL_CONTINUE
)
2547 return X86EMUL_UNHANDLEABLE
;
2549 if ((ctxt
->ops
->get_hflags(ctxt
) & X86EMUL_SMM_INSIDE_NMI_MASK
) == 0)
2550 ctxt
->ops
->set_nmi_mask(ctxt
, false);
2552 ctxt
->ops
->exiting_smm(ctxt
);
2555 * Get back to real mode, to prepare a safe state in which to load
2556 * CR0/CR3/CR4/EFER. It's all a bit more complicated if the vCPU
2557 * supports long mode.
2559 if (emulator_has_longmode(ctxt
)) {
2560 struct desc_struct cs_desc
;
2562 /* Zero CR4.PCIDE before CR0.PG. */
2563 cr4
= ctxt
->ops
->get_cr(ctxt
, 4);
2564 if (cr4
& X86_CR4_PCIDE
)
2565 ctxt
->ops
->set_cr(ctxt
, 4, cr4
& ~X86_CR4_PCIDE
);
2567 /* A 32-bit code segment is required to clear EFER.LMA. */
2568 memset(&cs_desc
, 0, sizeof(cs_desc
));
2570 cs_desc
.s
= cs_desc
.g
= cs_desc
.p
= 1;
2571 ctxt
->ops
->set_segment(ctxt
, 0, &cs_desc
, 0, VCPU_SREG_CS
);
2574 /* For the 64-bit case, this will clear EFER.LMA. */
2575 cr0
= ctxt
->ops
->get_cr(ctxt
, 0);
2576 if (cr0
& X86_CR0_PE
)
2577 ctxt
->ops
->set_cr(ctxt
, 0, cr0
& ~(X86_CR0_PG
| X86_CR0_PE
));
2579 if (emulator_has_longmode(ctxt
)) {
2580 /* Clear CR4.PAE before clearing EFER.LME. */
2581 cr4
= ctxt
->ops
->get_cr(ctxt
, 4);
2582 if (cr4
& X86_CR4_PAE
)
2583 ctxt
->ops
->set_cr(ctxt
, 4, cr4
& ~X86_CR4_PAE
);
2585 /* And finally go back to 32-bit mode. */
2587 ctxt
->ops
->set_msr(ctxt
, MSR_EFER
, efer
);
2591 * Give leave_smm() a chance to make ISA-specific changes to the vCPU
2592 * state (e.g. enter guest mode) before loading state from the SMM
2595 if (ctxt
->ops
->leave_smm(ctxt
, buf
))
2596 goto emulate_shutdown
;
2598 #ifdef CONFIG_X86_64
2599 if (emulator_has_longmode(ctxt
))
2600 ret
= rsm_load_state_64(ctxt
, buf
);
2603 ret
= rsm_load_state_32(ctxt
, buf
);
2605 if (ret
!= X86EMUL_CONTINUE
)
2606 goto emulate_shutdown
;
2609 * Note, the ctxt->ops callbacks are responsible for handling side
2610 * effects when writing MSRs and CRs, e.g. MMU context resets, CPUID
2611 * runtime updates, etc... If that changes, e.g. this flow is moved
2612 * out of the emulator to make it look more like enter_smm(), then
2613 * those side effects need to be explicitly handled for both success
2616 return X86EMUL_CONTINUE
;
2619 ctxt
->ops
->triple_fault(ctxt
);
2620 return X86EMUL_CONTINUE
;
2624 setup_syscalls_segments(struct desc_struct
*cs
, struct desc_struct
*ss
)
2626 cs
->l
= 0; /* will be adjusted later */
2627 set_desc_base(cs
, 0); /* flat segment */
2628 cs
->g
= 1; /* 4kb granularity */
2629 set_desc_limit(cs
, 0xfffff); /* 4GB limit */
2630 cs
->type
= 0x0b; /* Read, Execute, Accessed */
2632 cs
->dpl
= 0; /* will be adjusted later */
2637 set_desc_base(ss
, 0); /* flat segment */
2638 set_desc_limit(ss
, 0xfffff); /* 4GB limit */
2639 ss
->g
= 1; /* 4kb granularity */
2641 ss
->type
= 0x03; /* Read/Write, Accessed */
2642 ss
->d
= 1; /* 32bit stack segment */
2649 static bool vendor_intel(struct x86_emulate_ctxt
*ctxt
)
2651 u32 eax
, ebx
, ecx
, edx
;
2654 ctxt
->ops
->get_cpuid(ctxt
, &eax
, &ebx
, &ecx
, &edx
, true);
2655 return is_guest_vendor_intel(ebx
, ecx
, edx
);
2658 static bool em_syscall_is_enabled(struct x86_emulate_ctxt
*ctxt
)
2660 const struct x86_emulate_ops
*ops
= ctxt
->ops
;
2661 u32 eax
, ebx
, ecx
, edx
;
2664 * syscall should always be enabled in longmode - so only become
2665 * vendor specific (cpuid) if other modes are active...
2667 if (ctxt
->mode
== X86EMUL_MODE_PROT64
)
2672 ops
->get_cpuid(ctxt
, &eax
, &ebx
, &ecx
, &edx
, true);
2674 * remark: Intel CPUs only support "syscall" in 64bit longmode. Also a
2675 * 64bit guest with a 32bit compat-app running will #UD !! While this
2676 * behaviour can be fixed (by emulating) into AMD response - CPUs of
2677 * AMD can't behave like Intel.
2679 if (is_guest_vendor_intel(ebx
, ecx
, edx
))
2682 if (is_guest_vendor_amd(ebx
, ecx
, edx
) ||
2683 is_guest_vendor_hygon(ebx
, ecx
, edx
))
2687 * default: (not Intel, not AMD, not Hygon), apply Intel's
2693 static int em_syscall(struct x86_emulate_ctxt
*ctxt
)
2695 const struct x86_emulate_ops
*ops
= ctxt
->ops
;
2696 struct desc_struct cs
, ss
;
2701 /* syscall is not available in real mode */
2702 if (ctxt
->mode
== X86EMUL_MODE_REAL
||
2703 ctxt
->mode
== X86EMUL_MODE_VM86
)
2704 return emulate_ud(ctxt
);
2706 if (!(em_syscall_is_enabled(ctxt
)))
2707 return emulate_ud(ctxt
);
2709 ops
->get_msr(ctxt
, MSR_EFER
, &efer
);
2710 if (!(efer
& EFER_SCE
))
2711 return emulate_ud(ctxt
);
2713 setup_syscalls_segments(&cs
, &ss
);
2714 ops
->get_msr(ctxt
, MSR_STAR
, &msr_data
);
2716 cs_sel
= (u16
)(msr_data
& 0xfffc);
2717 ss_sel
= (u16
)(msr_data
+ 8);
2719 if (efer
& EFER_LMA
) {
2723 ops
->set_segment(ctxt
, cs_sel
, &cs
, 0, VCPU_SREG_CS
);
2724 ops
->set_segment(ctxt
, ss_sel
, &ss
, 0, VCPU_SREG_SS
);
2726 *reg_write(ctxt
, VCPU_REGS_RCX
) = ctxt
->_eip
;
2727 if (efer
& EFER_LMA
) {
2728 #ifdef CONFIG_X86_64
2729 *reg_write(ctxt
, VCPU_REGS_R11
) = ctxt
->eflags
;
2732 ctxt
->mode
== X86EMUL_MODE_PROT64
?
2733 MSR_LSTAR
: MSR_CSTAR
, &msr_data
);
2734 ctxt
->_eip
= msr_data
;
2736 ops
->get_msr(ctxt
, MSR_SYSCALL_MASK
, &msr_data
);
2737 ctxt
->eflags
&= ~msr_data
;
2738 ctxt
->eflags
|= X86_EFLAGS_FIXED
;
2742 ops
->get_msr(ctxt
, MSR_STAR
, &msr_data
);
2743 ctxt
->_eip
= (u32
)msr_data
;
2745 ctxt
->eflags
&= ~(X86_EFLAGS_VM
| X86_EFLAGS_IF
);
2748 ctxt
->tf
= (ctxt
->eflags
& X86_EFLAGS_TF
) != 0;
2749 return X86EMUL_CONTINUE
;
2752 static int em_sysenter(struct x86_emulate_ctxt
*ctxt
)
2754 const struct x86_emulate_ops
*ops
= ctxt
->ops
;
2755 struct desc_struct cs
, ss
;
2760 ops
->get_msr(ctxt
, MSR_EFER
, &efer
);
2761 /* inject #GP if in real mode */
2762 if (ctxt
->mode
== X86EMUL_MODE_REAL
)
2763 return emulate_gp(ctxt
, 0);
2766 * Not recognized on AMD in compat mode (but is recognized in legacy
2769 if ((ctxt
->mode
!= X86EMUL_MODE_PROT64
) && (efer
& EFER_LMA
)
2770 && !vendor_intel(ctxt
))
2771 return emulate_ud(ctxt
);
2773 /* sysenter/sysexit have not been tested in 64bit mode. */
2774 if (ctxt
->mode
== X86EMUL_MODE_PROT64
)
2775 return X86EMUL_UNHANDLEABLE
;
2777 ops
->get_msr(ctxt
, MSR_IA32_SYSENTER_CS
, &msr_data
);
2778 if ((msr_data
& 0xfffc) == 0x0)
2779 return emulate_gp(ctxt
, 0);
2781 setup_syscalls_segments(&cs
, &ss
);
2782 ctxt
->eflags
&= ~(X86_EFLAGS_VM
| X86_EFLAGS_IF
);
2783 cs_sel
= (u16
)msr_data
& ~SEGMENT_RPL_MASK
;
2784 ss_sel
= cs_sel
+ 8;
2785 if (efer
& EFER_LMA
) {
2790 ops
->set_segment(ctxt
, cs_sel
, &cs
, 0, VCPU_SREG_CS
);
2791 ops
->set_segment(ctxt
, ss_sel
, &ss
, 0, VCPU_SREG_SS
);
2793 ops
->get_msr(ctxt
, MSR_IA32_SYSENTER_EIP
, &msr_data
);
2794 ctxt
->_eip
= (efer
& EFER_LMA
) ? msr_data
: (u32
)msr_data
;
2796 ops
->get_msr(ctxt
, MSR_IA32_SYSENTER_ESP
, &msr_data
);
2797 *reg_write(ctxt
, VCPU_REGS_RSP
) = (efer
& EFER_LMA
) ? msr_data
:
2799 if (efer
& EFER_LMA
)
2800 ctxt
->mode
= X86EMUL_MODE_PROT64
;
2802 return X86EMUL_CONTINUE
;
2805 static int em_sysexit(struct x86_emulate_ctxt
*ctxt
)
2807 const struct x86_emulate_ops
*ops
= ctxt
->ops
;
2808 struct desc_struct cs
, ss
;
2809 u64 msr_data
, rcx
, rdx
;
2811 u16 cs_sel
= 0, ss_sel
= 0;
2813 /* inject #GP if in real mode or Virtual 8086 mode */
2814 if (ctxt
->mode
== X86EMUL_MODE_REAL
||
2815 ctxt
->mode
== X86EMUL_MODE_VM86
)
2816 return emulate_gp(ctxt
, 0);
2818 setup_syscalls_segments(&cs
, &ss
);
2820 if ((ctxt
->rex_prefix
& 0x8) != 0x0)
2821 usermode
= X86EMUL_MODE_PROT64
;
2823 usermode
= X86EMUL_MODE_PROT32
;
2825 rcx
= reg_read(ctxt
, VCPU_REGS_RCX
);
2826 rdx
= reg_read(ctxt
, VCPU_REGS_RDX
);
2830 ops
->get_msr(ctxt
, MSR_IA32_SYSENTER_CS
, &msr_data
);
2832 case X86EMUL_MODE_PROT32
:
2833 cs_sel
= (u16
)(msr_data
+ 16);
2834 if ((msr_data
& 0xfffc) == 0x0)
2835 return emulate_gp(ctxt
, 0);
2836 ss_sel
= (u16
)(msr_data
+ 24);
2840 case X86EMUL_MODE_PROT64
:
2841 cs_sel
= (u16
)(msr_data
+ 32);
2842 if (msr_data
== 0x0)
2843 return emulate_gp(ctxt
, 0);
2844 ss_sel
= cs_sel
+ 8;
2847 if (emul_is_noncanonical_address(rcx
, ctxt
) ||
2848 emul_is_noncanonical_address(rdx
, ctxt
))
2849 return emulate_gp(ctxt
, 0);
2852 cs_sel
|= SEGMENT_RPL_MASK
;
2853 ss_sel
|= SEGMENT_RPL_MASK
;
2855 ops
->set_segment(ctxt
, cs_sel
, &cs
, 0, VCPU_SREG_CS
);
2856 ops
->set_segment(ctxt
, ss_sel
, &ss
, 0, VCPU_SREG_SS
);
2859 *reg_write(ctxt
, VCPU_REGS_RSP
) = rcx
;
2861 return X86EMUL_CONTINUE
;
2864 static bool emulator_bad_iopl(struct x86_emulate_ctxt
*ctxt
)
2867 if (ctxt
->mode
== X86EMUL_MODE_REAL
)
2869 if (ctxt
->mode
== X86EMUL_MODE_VM86
)
2871 iopl
= (ctxt
->eflags
& X86_EFLAGS_IOPL
) >> X86_EFLAGS_IOPL_BIT
;
2872 return ctxt
->ops
->cpl(ctxt
) > iopl
;
2875 #define VMWARE_PORT_VMPORT (0x5658)
2876 #define VMWARE_PORT_VMRPC (0x5659)
2878 static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt
*ctxt
,
2881 const struct x86_emulate_ops
*ops
= ctxt
->ops
;
2882 struct desc_struct tr_seg
;
2885 u16 tr
, io_bitmap_ptr
, perm
, bit_idx
= port
& 0x7;
2886 unsigned mask
= (1 << len
) - 1;
2890 * VMware allows access to these ports even if denied
2891 * by TSS I/O permission bitmap. Mimic behavior.
2893 if (enable_vmware_backdoor
&&
2894 ((port
== VMWARE_PORT_VMPORT
) || (port
== VMWARE_PORT_VMRPC
)))
2897 ops
->get_segment(ctxt
, &tr
, &tr_seg
, &base3
, VCPU_SREG_TR
);
2900 if (desc_limit_scaled(&tr_seg
) < 103)
2902 base
= get_desc_base(&tr_seg
);
2903 #ifdef CONFIG_X86_64
2904 base
|= ((u64
)base3
) << 32;
2906 r
= ops
->read_std(ctxt
, base
+ 102, &io_bitmap_ptr
, 2, NULL
, true);
2907 if (r
!= X86EMUL_CONTINUE
)
2909 if (io_bitmap_ptr
+ port
/8 > desc_limit_scaled(&tr_seg
))
2911 r
= ops
->read_std(ctxt
, base
+ io_bitmap_ptr
+ port
/8, &perm
, 2, NULL
, true);
2912 if (r
!= X86EMUL_CONTINUE
)
2914 if ((perm
>> bit_idx
) & mask
)
2919 static bool emulator_io_permited(struct x86_emulate_ctxt
*ctxt
,
2925 if (emulator_bad_iopl(ctxt
))
2926 if (!emulator_io_port_access_allowed(ctxt
, port
, len
))
2929 ctxt
->perm_ok
= true;
2934 static void string_registers_quirk(struct x86_emulate_ctxt
*ctxt
)
2937 * Intel CPUs mask the counter and pointers in quite strange
2938 * manner when ECX is zero due to REP-string optimizations.
2940 #ifdef CONFIG_X86_64
2941 if (ctxt
->ad_bytes
!= 4 || !vendor_intel(ctxt
))
2944 *reg_write(ctxt
, VCPU_REGS_RCX
) = 0;
2947 case 0xa4: /* movsb */
2948 case 0xa5: /* movsd/w */
2949 *reg_rmw(ctxt
, VCPU_REGS_RSI
) &= (u32
)-1;
2951 case 0xaa: /* stosb */
2952 case 0xab: /* stosd/w */
2953 *reg_rmw(ctxt
, VCPU_REGS_RDI
) &= (u32
)-1;
2958 static void save_state_to_tss16(struct x86_emulate_ctxt
*ctxt
,
2959 struct tss_segment_16
*tss
)
2961 tss
->ip
= ctxt
->_eip
;
2962 tss
->flag
= ctxt
->eflags
;
2963 tss
->ax
= reg_read(ctxt
, VCPU_REGS_RAX
);
2964 tss
->cx
= reg_read(ctxt
, VCPU_REGS_RCX
);
2965 tss
->dx
= reg_read(ctxt
, VCPU_REGS_RDX
);
2966 tss
->bx
= reg_read(ctxt
, VCPU_REGS_RBX
);
2967 tss
->sp
= reg_read(ctxt
, VCPU_REGS_RSP
);
2968 tss
->bp
= reg_read(ctxt
, VCPU_REGS_RBP
);
2969 tss
->si
= reg_read(ctxt
, VCPU_REGS_RSI
);
2970 tss
->di
= reg_read(ctxt
, VCPU_REGS_RDI
);
2972 tss
->es
= get_segment_selector(ctxt
, VCPU_SREG_ES
);
2973 tss
->cs
= get_segment_selector(ctxt
, VCPU_SREG_CS
);
2974 tss
->ss
= get_segment_selector(ctxt
, VCPU_SREG_SS
);
2975 tss
->ds
= get_segment_selector(ctxt
, VCPU_SREG_DS
);
2976 tss
->ldt
= get_segment_selector(ctxt
, VCPU_SREG_LDTR
);
2979 static int load_state_from_tss16(struct x86_emulate_ctxt
*ctxt
,
2980 struct tss_segment_16
*tss
)
2985 ctxt
->_eip
= tss
->ip
;
2986 ctxt
->eflags
= tss
->flag
| 2;
2987 *reg_write(ctxt
, VCPU_REGS_RAX
) = tss
->ax
;
2988 *reg_write(ctxt
, VCPU_REGS_RCX
) = tss
->cx
;
2989 *reg_write(ctxt
, VCPU_REGS_RDX
) = tss
->dx
;
2990 *reg_write(ctxt
, VCPU_REGS_RBX
) = tss
->bx
;
2991 *reg_write(ctxt
, VCPU_REGS_RSP
) = tss
->sp
;
2992 *reg_write(ctxt
, VCPU_REGS_RBP
) = tss
->bp
;
2993 *reg_write(ctxt
, VCPU_REGS_RSI
) = tss
->si
;
2994 *reg_write(ctxt
, VCPU_REGS_RDI
) = tss
->di
;
2997 * SDM says that segment selectors are loaded before segment
3000 set_segment_selector(ctxt
, tss
->ldt
, VCPU_SREG_LDTR
);
3001 set_segment_selector(ctxt
, tss
->es
, VCPU_SREG_ES
);
3002 set_segment_selector(ctxt
, tss
->cs
, VCPU_SREG_CS
);
3003 set_segment_selector(ctxt
, tss
->ss
, VCPU_SREG_SS
);
3004 set_segment_selector(ctxt
, tss
->ds
, VCPU_SREG_DS
);
3009 * Now load segment descriptors. If fault happens at this stage
3010 * it is handled in a context of new task
3012 ret
= __load_segment_descriptor(ctxt
, tss
->ldt
, VCPU_SREG_LDTR
, cpl
,
3013 X86_TRANSFER_TASK_SWITCH
, NULL
);
3014 if (ret
!= X86EMUL_CONTINUE
)
3016 ret
= __load_segment_descriptor(ctxt
, tss
->es
, VCPU_SREG_ES
, cpl
,
3017 X86_TRANSFER_TASK_SWITCH
, NULL
);
3018 if (ret
!= X86EMUL_CONTINUE
)
3020 ret
= __load_segment_descriptor(ctxt
, tss
->cs
, VCPU_SREG_CS
, cpl
,
3021 X86_TRANSFER_TASK_SWITCH
, NULL
);
3022 if (ret
!= X86EMUL_CONTINUE
)
3024 ret
= __load_segment_descriptor(ctxt
, tss
->ss
, VCPU_SREG_SS
, cpl
,
3025 X86_TRANSFER_TASK_SWITCH
, NULL
);
3026 if (ret
!= X86EMUL_CONTINUE
)
3028 ret
= __load_segment_descriptor(ctxt
, tss
->ds
, VCPU_SREG_DS
, cpl
,
3029 X86_TRANSFER_TASK_SWITCH
, NULL
);
3030 if (ret
!= X86EMUL_CONTINUE
)
3033 return X86EMUL_CONTINUE
;
3036 static int task_switch_16(struct x86_emulate_ctxt
*ctxt
, u16 old_tss_sel
,
3037 ulong old_tss_base
, struct desc_struct
*new_desc
)
3039 struct tss_segment_16 tss_seg
;
3041 u32 new_tss_base
= get_desc_base(new_desc
);
3043 ret
= linear_read_system(ctxt
, old_tss_base
, &tss_seg
, sizeof(tss_seg
));
3044 if (ret
!= X86EMUL_CONTINUE
)
3047 save_state_to_tss16(ctxt
, &tss_seg
);
3049 ret
= linear_write_system(ctxt
, old_tss_base
, &tss_seg
, sizeof(tss_seg
));
3050 if (ret
!= X86EMUL_CONTINUE
)
3053 ret
= linear_read_system(ctxt
, new_tss_base
, &tss_seg
, sizeof(tss_seg
));
3054 if (ret
!= X86EMUL_CONTINUE
)
3057 if (old_tss_sel
!= 0xffff) {
3058 tss_seg
.prev_task_link
= old_tss_sel
;
3060 ret
= linear_write_system(ctxt
, new_tss_base
,
3061 &tss_seg
.prev_task_link
,
3062 sizeof(tss_seg
.prev_task_link
));
3063 if (ret
!= X86EMUL_CONTINUE
)
3067 return load_state_from_tss16(ctxt
, &tss_seg
);
3070 static void save_state_to_tss32(struct x86_emulate_ctxt
*ctxt
,
3071 struct tss_segment_32
*tss
)
3073 /* CR3 and ldt selector are not saved intentionally */
3074 tss
->eip
= ctxt
->_eip
;
3075 tss
->eflags
= ctxt
->eflags
;
3076 tss
->eax
= reg_read(ctxt
, VCPU_REGS_RAX
);
3077 tss
->ecx
= reg_read(ctxt
, VCPU_REGS_RCX
);
3078 tss
->edx
= reg_read(ctxt
, VCPU_REGS_RDX
);
3079 tss
->ebx
= reg_read(ctxt
, VCPU_REGS_RBX
);
3080 tss
->esp
= reg_read(ctxt
, VCPU_REGS_RSP
);
3081 tss
->ebp
= reg_read(ctxt
, VCPU_REGS_RBP
);
3082 tss
->esi
= reg_read(ctxt
, VCPU_REGS_RSI
);
3083 tss
->edi
= reg_read(ctxt
, VCPU_REGS_RDI
);
3085 tss
->es
= get_segment_selector(ctxt
, VCPU_SREG_ES
);
3086 tss
->cs
= get_segment_selector(ctxt
, VCPU_SREG_CS
);
3087 tss
->ss
= get_segment_selector(ctxt
, VCPU_SREG_SS
);
3088 tss
->ds
= get_segment_selector(ctxt
, VCPU_SREG_DS
);
3089 tss
->fs
= get_segment_selector(ctxt
, VCPU_SREG_FS
);
3090 tss
->gs
= get_segment_selector(ctxt
, VCPU_SREG_GS
);
3093 static int load_state_from_tss32(struct x86_emulate_ctxt
*ctxt
,
3094 struct tss_segment_32
*tss
)
3099 if (ctxt
->ops
->set_cr(ctxt
, 3, tss
->cr3
))
3100 return emulate_gp(ctxt
, 0);
3101 ctxt
->_eip
= tss
->eip
;
3102 ctxt
->eflags
= tss
->eflags
| 2;
3104 /* General purpose registers */
3105 *reg_write(ctxt
, VCPU_REGS_RAX
) = tss
->eax
;
3106 *reg_write(ctxt
, VCPU_REGS_RCX
) = tss
->ecx
;
3107 *reg_write(ctxt
, VCPU_REGS_RDX
) = tss
->edx
;
3108 *reg_write(ctxt
, VCPU_REGS_RBX
) = tss
->ebx
;
3109 *reg_write(ctxt
, VCPU_REGS_RSP
) = tss
->esp
;
3110 *reg_write(ctxt
, VCPU_REGS_RBP
) = tss
->ebp
;
3111 *reg_write(ctxt
, VCPU_REGS_RSI
) = tss
->esi
;
3112 *reg_write(ctxt
, VCPU_REGS_RDI
) = tss
->edi
;
3115 * SDM says that segment selectors are loaded before segment
3116 * descriptors. This is important because CPL checks will
3119 set_segment_selector(ctxt
, tss
->ldt_selector
, VCPU_SREG_LDTR
);
3120 set_segment_selector(ctxt
, tss
->es
, VCPU_SREG_ES
);
3121 set_segment_selector(ctxt
, tss
->cs
, VCPU_SREG_CS
);
3122 set_segment_selector(ctxt
, tss
->ss
, VCPU_SREG_SS
);
3123 set_segment_selector(ctxt
, tss
->ds
, VCPU_SREG_DS
);
3124 set_segment_selector(ctxt
, tss
->fs
, VCPU_SREG_FS
);
3125 set_segment_selector(ctxt
, tss
->gs
, VCPU_SREG_GS
);
3128 * If we're switching between Protected Mode and VM86, we need to make
3129 * sure to update the mode before loading the segment descriptors so
3130 * that the selectors are interpreted correctly.
3132 if (ctxt
->eflags
& X86_EFLAGS_VM
) {
3133 ctxt
->mode
= X86EMUL_MODE_VM86
;
3136 ctxt
->mode
= X86EMUL_MODE_PROT32
;
3141 * Now load segment descriptors. If fault happens at this stage
3142 * it is handled in a context of new task
3144 ret
= __load_segment_descriptor(ctxt
, tss
->ldt_selector
, VCPU_SREG_LDTR
,
3145 cpl
, X86_TRANSFER_TASK_SWITCH
, NULL
);
3146 if (ret
!= X86EMUL_CONTINUE
)
3148 ret
= __load_segment_descriptor(ctxt
, tss
->es
, VCPU_SREG_ES
, cpl
,
3149 X86_TRANSFER_TASK_SWITCH
, NULL
);
3150 if (ret
!= X86EMUL_CONTINUE
)
3152 ret
= __load_segment_descriptor(ctxt
, tss
->cs
, VCPU_SREG_CS
, cpl
,
3153 X86_TRANSFER_TASK_SWITCH
, NULL
);
3154 if (ret
!= X86EMUL_CONTINUE
)
3156 ret
= __load_segment_descriptor(ctxt
, tss
->ss
, VCPU_SREG_SS
, cpl
,
3157 X86_TRANSFER_TASK_SWITCH
, NULL
);
3158 if (ret
!= X86EMUL_CONTINUE
)
3160 ret
= __load_segment_descriptor(ctxt
, tss
->ds
, VCPU_SREG_DS
, cpl
,
3161 X86_TRANSFER_TASK_SWITCH
, NULL
);
3162 if (ret
!= X86EMUL_CONTINUE
)
3164 ret
= __load_segment_descriptor(ctxt
, tss
->fs
, VCPU_SREG_FS
, cpl
,
3165 X86_TRANSFER_TASK_SWITCH
, NULL
);
3166 if (ret
!= X86EMUL_CONTINUE
)
3168 ret
= __load_segment_descriptor(ctxt
, tss
->gs
, VCPU_SREG_GS
, cpl
,
3169 X86_TRANSFER_TASK_SWITCH
, NULL
);
3174 static int task_switch_32(struct x86_emulate_ctxt
*ctxt
, u16 old_tss_sel
,
3175 ulong old_tss_base
, struct desc_struct
*new_desc
)
3177 struct tss_segment_32 tss_seg
;
3179 u32 new_tss_base
= get_desc_base(new_desc
);
3180 u32 eip_offset
= offsetof(struct tss_segment_32
, eip
);
3181 u32 ldt_sel_offset
= offsetof(struct tss_segment_32
, ldt_selector
);
3183 ret
= linear_read_system(ctxt
, old_tss_base
, &tss_seg
, sizeof(tss_seg
));
3184 if (ret
!= X86EMUL_CONTINUE
)
3187 save_state_to_tss32(ctxt
, &tss_seg
);
3189 /* Only GP registers and segment selectors are saved */
3190 ret
= linear_write_system(ctxt
, old_tss_base
+ eip_offset
, &tss_seg
.eip
,
3191 ldt_sel_offset
- eip_offset
);
3192 if (ret
!= X86EMUL_CONTINUE
)
3195 ret
= linear_read_system(ctxt
, new_tss_base
, &tss_seg
, sizeof(tss_seg
));
3196 if (ret
!= X86EMUL_CONTINUE
)
3199 if (old_tss_sel
!= 0xffff) {
3200 tss_seg
.prev_task_link
= old_tss_sel
;
3202 ret
= linear_write_system(ctxt
, new_tss_base
,
3203 &tss_seg
.prev_task_link
,
3204 sizeof(tss_seg
.prev_task_link
));
3205 if (ret
!= X86EMUL_CONTINUE
)
3209 return load_state_from_tss32(ctxt
, &tss_seg
);
3212 static int emulator_do_task_switch(struct x86_emulate_ctxt
*ctxt
,
3213 u16 tss_selector
, int idt_index
, int reason
,
3214 bool has_error_code
, u32 error_code
)
3216 const struct x86_emulate_ops
*ops
= ctxt
->ops
;
3217 struct desc_struct curr_tss_desc
, next_tss_desc
;
3219 u16 old_tss_sel
= get_segment_selector(ctxt
, VCPU_SREG_TR
);
3220 ulong old_tss_base
=
3221 ops
->get_cached_segment_base(ctxt
, VCPU_SREG_TR
);
3223 ulong desc_addr
, dr7
;
3225 /* FIXME: old_tss_base == ~0 ? */
3227 ret
= read_segment_descriptor(ctxt
, tss_selector
, &next_tss_desc
, &desc_addr
);
3228 if (ret
!= X86EMUL_CONTINUE
)
3230 ret
= read_segment_descriptor(ctxt
, old_tss_sel
, &curr_tss_desc
, &desc_addr
);
3231 if (ret
!= X86EMUL_CONTINUE
)
3234 /* FIXME: check that next_tss_desc is tss */
3237 * Check privileges. The three cases are task switch caused by...
3239 * 1. jmp/call/int to task gate: Check against DPL of the task gate
3240 * 2. Exception/IRQ/iret: No check is performed
3241 * 3. jmp/call to TSS/task-gate: No check is performed since the
3242 * hardware checks it before exiting.
3244 if (reason
== TASK_SWITCH_GATE
) {
3245 if (idt_index
!= -1) {
3246 /* Software interrupts */
3247 struct desc_struct task_gate_desc
;
3250 ret
= read_interrupt_descriptor(ctxt
, idt_index
,
3252 if (ret
!= X86EMUL_CONTINUE
)
3255 dpl
= task_gate_desc
.dpl
;
3256 if ((tss_selector
& 3) > dpl
|| ops
->cpl(ctxt
) > dpl
)
3257 return emulate_gp(ctxt
, (idt_index
<< 3) | 0x2);
3261 desc_limit
= desc_limit_scaled(&next_tss_desc
);
3262 if (!next_tss_desc
.p
||
3263 ((desc_limit
< 0x67 && (next_tss_desc
.type
& 8)) ||
3264 desc_limit
< 0x2b)) {
3265 return emulate_ts(ctxt
, tss_selector
& 0xfffc);
3268 if (reason
== TASK_SWITCH_IRET
|| reason
== TASK_SWITCH_JMP
) {
3269 curr_tss_desc
.type
&= ~(1 << 1); /* clear busy flag */
3270 write_segment_descriptor(ctxt
, old_tss_sel
, &curr_tss_desc
);
3273 if (reason
== TASK_SWITCH_IRET
)
3274 ctxt
->eflags
= ctxt
->eflags
& ~X86_EFLAGS_NT
;
3276 /* set back link to prev task only if NT bit is set in eflags
3277 note that old_tss_sel is not used after this point */
3278 if (reason
!= TASK_SWITCH_CALL
&& reason
!= TASK_SWITCH_GATE
)
3279 old_tss_sel
= 0xffff;
3281 if (next_tss_desc
.type
& 8)
3282 ret
= task_switch_32(ctxt
, old_tss_sel
, old_tss_base
, &next_tss_desc
);
3284 ret
= task_switch_16(ctxt
, old_tss_sel
,
3285 old_tss_base
, &next_tss_desc
);
3286 if (ret
!= X86EMUL_CONTINUE
)
3289 if (reason
== TASK_SWITCH_CALL
|| reason
== TASK_SWITCH_GATE
)
3290 ctxt
->eflags
= ctxt
->eflags
| X86_EFLAGS_NT
;
3292 if (reason
!= TASK_SWITCH_IRET
) {
3293 next_tss_desc
.type
|= (1 << 1); /* set busy flag */
3294 write_segment_descriptor(ctxt
, tss_selector
, &next_tss_desc
);
3297 ops
->set_cr(ctxt
, 0, ops
->get_cr(ctxt
, 0) | X86_CR0_TS
);
3298 ops
->set_segment(ctxt
, tss_selector
, &next_tss_desc
, 0, VCPU_SREG_TR
);
3300 if (has_error_code
) {
3301 ctxt
->op_bytes
= ctxt
->ad_bytes
= (next_tss_desc
.type
& 8) ? 4 : 2;
3302 ctxt
->lock_prefix
= 0;
3303 ctxt
->src
.val
= (unsigned long) error_code
;
3304 ret
= em_push(ctxt
);
3307 ops
->get_dr(ctxt
, 7, &dr7
);
3308 ops
->set_dr(ctxt
, 7, dr7
& ~(DR_LOCAL_ENABLE_MASK
| DR_LOCAL_SLOWDOWN
));
3313 int emulator_task_switch(struct x86_emulate_ctxt
*ctxt
,
3314 u16 tss_selector
, int idt_index
, int reason
,
3315 bool has_error_code
, u32 error_code
)
3319 invalidate_registers(ctxt
);
3320 ctxt
->_eip
= ctxt
->eip
;
3321 ctxt
->dst
.type
= OP_NONE
;
3323 rc
= emulator_do_task_switch(ctxt
, tss_selector
, idt_index
, reason
,
3324 has_error_code
, error_code
);
3326 if (rc
== X86EMUL_CONTINUE
) {
3327 ctxt
->eip
= ctxt
->_eip
;
3328 writeback_registers(ctxt
);
3331 return (rc
== X86EMUL_UNHANDLEABLE
) ? EMULATION_FAILED
: EMULATION_OK
;
3334 static void string_addr_inc(struct x86_emulate_ctxt
*ctxt
, int reg
,
3337 int df
= (ctxt
->eflags
& X86_EFLAGS_DF
) ? -op
->count
: op
->count
;
3339 register_address_increment(ctxt
, reg
, df
* op
->bytes
);
3340 op
->addr
.mem
.ea
= register_address(ctxt
, reg
);
3343 static int em_das(struct x86_emulate_ctxt
*ctxt
)
3346 bool af
, cf
, old_cf
;
3348 cf
= ctxt
->eflags
& X86_EFLAGS_CF
;
3354 af
= ctxt
->eflags
& X86_EFLAGS_AF
;
3355 if ((al
& 0x0f) > 9 || af
) {
3357 cf
= old_cf
| (al
>= 250);
3362 if (old_al
> 0x99 || old_cf
) {
3368 /* Set PF, ZF, SF */
3369 ctxt
->src
.type
= OP_IMM
;
3371 ctxt
->src
.bytes
= 1;
3372 fastop(ctxt
, em_or
);
3373 ctxt
->eflags
&= ~(X86_EFLAGS_AF
| X86_EFLAGS_CF
);
3375 ctxt
->eflags
|= X86_EFLAGS_CF
;
3377 ctxt
->eflags
|= X86_EFLAGS_AF
;
3378 return X86EMUL_CONTINUE
;
3381 static int em_aam(struct x86_emulate_ctxt
*ctxt
)
3385 if (ctxt
->src
.val
== 0)
3386 return emulate_de(ctxt
);
3388 al
= ctxt
->dst
.val
& 0xff;
3389 ah
= al
/ ctxt
->src
.val
;
3390 al
%= ctxt
->src
.val
;
3392 ctxt
->dst
.val
= (ctxt
->dst
.val
& 0xffff0000) | al
| (ah
<< 8);
3394 /* Set PF, ZF, SF */
3395 ctxt
->src
.type
= OP_IMM
;
3397 ctxt
->src
.bytes
= 1;
3398 fastop(ctxt
, em_or
);
3400 return X86EMUL_CONTINUE
;
3403 static int em_aad(struct x86_emulate_ctxt
*ctxt
)
3405 u8 al
= ctxt
->dst
.val
& 0xff;
3406 u8 ah
= (ctxt
->dst
.val
>> 8) & 0xff;
3408 al
= (al
+ (ah
* ctxt
->src
.val
)) & 0xff;
3410 ctxt
->dst
.val
= (ctxt
->dst
.val
& 0xffff0000) | al
;
3412 /* Set PF, ZF, SF */
3413 ctxt
->src
.type
= OP_IMM
;
3415 ctxt
->src
.bytes
= 1;
3416 fastop(ctxt
, em_or
);
3418 return X86EMUL_CONTINUE
;
3421 static int em_call(struct x86_emulate_ctxt
*ctxt
)
3424 long rel
= ctxt
->src
.val
;
3426 ctxt
->src
.val
= (unsigned long)ctxt
->_eip
;
3427 rc
= jmp_rel(ctxt
, rel
);
3428 if (rc
!= X86EMUL_CONTINUE
)
3430 return em_push(ctxt
);
3433 static int em_call_far(struct x86_emulate_ctxt
*ctxt
)
3438 struct desc_struct old_desc
, new_desc
;
3439 const struct x86_emulate_ops
*ops
= ctxt
->ops
;
3440 int cpl
= ctxt
->ops
->cpl(ctxt
);
3441 enum x86emul_mode prev_mode
= ctxt
->mode
;
3443 old_eip
= ctxt
->_eip
;
3444 ops
->get_segment(ctxt
, &old_cs
, &old_desc
, NULL
, VCPU_SREG_CS
);
3446 memcpy(&sel
, ctxt
->src
.valptr
+ ctxt
->op_bytes
, 2);
3447 rc
= __load_segment_descriptor(ctxt
, sel
, VCPU_SREG_CS
, cpl
,
3448 X86_TRANSFER_CALL_JMP
, &new_desc
);
3449 if (rc
!= X86EMUL_CONTINUE
)
3452 rc
= assign_eip_far(ctxt
, ctxt
->src
.val
, &new_desc
);
3453 if (rc
!= X86EMUL_CONTINUE
)
3456 ctxt
->src
.val
= old_cs
;
3458 if (rc
!= X86EMUL_CONTINUE
)
3461 ctxt
->src
.val
= old_eip
;
3463 /* If we failed, we tainted the memory, but the very least we should
3465 if (rc
!= X86EMUL_CONTINUE
) {
3466 pr_warn_once("faulting far call emulation tainted memory\n");
3471 ops
->set_segment(ctxt
, old_cs
, &old_desc
, 0, VCPU_SREG_CS
);
3472 ctxt
->mode
= prev_mode
;
3477 static int em_ret_near_imm(struct x86_emulate_ctxt
*ctxt
)
3482 rc
= emulate_pop(ctxt
, &eip
, ctxt
->op_bytes
);
3483 if (rc
!= X86EMUL_CONTINUE
)
3485 rc
= assign_eip_near(ctxt
, eip
);
3486 if (rc
!= X86EMUL_CONTINUE
)
3488 rsp_increment(ctxt
, ctxt
->src
.val
);
3489 return X86EMUL_CONTINUE
;
3492 static int em_xchg(struct x86_emulate_ctxt
*ctxt
)
3494 /* Write back the register source. */
3495 ctxt
->src
.val
= ctxt
->dst
.val
;
3496 write_register_operand(&ctxt
->src
);
3498 /* Write back the memory destination with implicit LOCK prefix. */
3499 ctxt
->dst
.val
= ctxt
->src
.orig_val
;
3500 ctxt
->lock_prefix
= 1;
3501 return X86EMUL_CONTINUE
;
3504 static int em_imul_3op(struct x86_emulate_ctxt
*ctxt
)
3506 ctxt
->dst
.val
= ctxt
->src2
.val
;
3507 return fastop(ctxt
, em_imul
);
3510 static int em_cwd(struct x86_emulate_ctxt
*ctxt
)
3512 ctxt
->dst
.type
= OP_REG
;
3513 ctxt
->dst
.bytes
= ctxt
->src
.bytes
;
3514 ctxt
->dst
.addr
.reg
= reg_rmw(ctxt
, VCPU_REGS_RDX
);
3515 ctxt
->dst
.val
= ~((ctxt
->src
.val
>> (ctxt
->src
.bytes
* 8 - 1)) - 1);
3517 return X86EMUL_CONTINUE
;
3520 static int em_rdpid(struct x86_emulate_ctxt
*ctxt
)
3524 if (!ctxt
->ops
->guest_has_rdpid(ctxt
))
3525 return emulate_ud(ctxt
);
3527 ctxt
->ops
->get_msr(ctxt
, MSR_TSC_AUX
, &tsc_aux
);
3528 ctxt
->dst
.val
= tsc_aux
;
3529 return X86EMUL_CONTINUE
;
3532 static int em_rdtsc(struct x86_emulate_ctxt
*ctxt
)
3536 ctxt
->ops
->get_msr(ctxt
, MSR_IA32_TSC
, &tsc
);
3537 *reg_write(ctxt
, VCPU_REGS_RAX
) = (u32
)tsc
;
3538 *reg_write(ctxt
, VCPU_REGS_RDX
) = tsc
>> 32;
3539 return X86EMUL_CONTINUE
;
3542 static int em_rdpmc(struct x86_emulate_ctxt
*ctxt
)
3546 if (ctxt
->ops
->read_pmc(ctxt
, reg_read(ctxt
, VCPU_REGS_RCX
), &pmc
))
3547 return emulate_gp(ctxt
, 0);
3548 *reg_write(ctxt
, VCPU_REGS_RAX
) = (u32
)pmc
;
3549 *reg_write(ctxt
, VCPU_REGS_RDX
) = pmc
>> 32;
3550 return X86EMUL_CONTINUE
;
3553 static int em_mov(struct x86_emulate_ctxt
*ctxt
)
3555 memcpy(ctxt
->dst
.valptr
, ctxt
->src
.valptr
, sizeof(ctxt
->src
.valptr
));
3556 return X86EMUL_CONTINUE
;
3559 static int em_movbe(struct x86_emulate_ctxt
*ctxt
)
3563 if (!ctxt
->ops
->guest_has_movbe(ctxt
))
3564 return emulate_ud(ctxt
);
3566 switch (ctxt
->op_bytes
) {
3569 * From MOVBE definition: "...When the operand size is 16 bits,
3570 * the upper word of the destination register remains unchanged
3573 * Both casting ->valptr and ->val to u16 breaks strict aliasing
3574 * rules so we have to do the operation almost per hand.
3576 tmp
= (u16
)ctxt
->src
.val
;
3577 ctxt
->dst
.val
&= ~0xffffUL
;
3578 ctxt
->dst
.val
|= (unsigned long)swab16(tmp
);
3581 ctxt
->dst
.val
= swab32((u32
)ctxt
->src
.val
);
3584 ctxt
->dst
.val
= swab64(ctxt
->src
.val
);
3589 return X86EMUL_CONTINUE
;
3592 static int em_cr_write(struct x86_emulate_ctxt
*ctxt
)
3594 if (ctxt
->ops
->set_cr(ctxt
, ctxt
->modrm_reg
, ctxt
->src
.val
))
3595 return emulate_gp(ctxt
, 0);
3597 /* Disable writeback. */
3598 ctxt
->dst
.type
= OP_NONE
;
3599 return X86EMUL_CONTINUE
;
3602 static int em_dr_write(struct x86_emulate_ctxt
*ctxt
)
3606 if (ctxt
->mode
== X86EMUL_MODE_PROT64
)
3607 val
= ctxt
->src
.val
& ~0ULL;
3609 val
= ctxt
->src
.val
& ~0U;
3611 /* #UD condition is already handled. */
3612 if (ctxt
->ops
->set_dr(ctxt
, ctxt
->modrm_reg
, val
) < 0)
3613 return emulate_gp(ctxt
, 0);
3615 /* Disable writeback. */
3616 ctxt
->dst
.type
= OP_NONE
;
3617 return X86EMUL_CONTINUE
;
3620 static int em_wrmsr(struct x86_emulate_ctxt
*ctxt
)
3622 u64 msr_index
= reg_read(ctxt
, VCPU_REGS_RCX
);
3626 msr_data
= (u32
)reg_read(ctxt
, VCPU_REGS_RAX
)
3627 | ((u64
)reg_read(ctxt
, VCPU_REGS_RDX
) << 32);
3628 r
= ctxt
->ops
->set_msr_with_filter(ctxt
, msr_index
, msr_data
);
3630 if (r
== X86EMUL_IO_NEEDED
)
3634 return emulate_gp(ctxt
, 0);
3636 return r
< 0 ? X86EMUL_UNHANDLEABLE
: X86EMUL_CONTINUE
;
3639 static int em_rdmsr(struct x86_emulate_ctxt
*ctxt
)
3641 u64 msr_index
= reg_read(ctxt
, VCPU_REGS_RCX
);
3645 r
= ctxt
->ops
->get_msr_with_filter(ctxt
, msr_index
, &msr_data
);
3647 if (r
== X86EMUL_IO_NEEDED
)
3651 return emulate_gp(ctxt
, 0);
3653 *reg_write(ctxt
, VCPU_REGS_RAX
) = (u32
)msr_data
;
3654 *reg_write(ctxt
, VCPU_REGS_RDX
) = msr_data
>> 32;
3655 return X86EMUL_CONTINUE
;
3658 static int em_store_sreg(struct x86_emulate_ctxt
*ctxt
, int segment
)
3660 if (segment
> VCPU_SREG_GS
&&
3661 (ctxt
->ops
->get_cr(ctxt
, 4) & X86_CR4_UMIP
) &&
3662 ctxt
->ops
->cpl(ctxt
) > 0)
3663 return emulate_gp(ctxt
, 0);
3665 ctxt
->dst
.val
= get_segment_selector(ctxt
, segment
);
3666 if (ctxt
->dst
.bytes
== 4 && ctxt
->dst
.type
== OP_MEM
)
3667 ctxt
->dst
.bytes
= 2;
3668 return X86EMUL_CONTINUE
;
3671 static int em_mov_rm_sreg(struct x86_emulate_ctxt
*ctxt
)
3673 if (ctxt
->modrm_reg
> VCPU_SREG_GS
)
3674 return emulate_ud(ctxt
);
3676 return em_store_sreg(ctxt
, ctxt
->modrm_reg
);
3679 static int em_mov_sreg_rm(struct x86_emulate_ctxt
*ctxt
)
3681 u16 sel
= ctxt
->src
.val
;
3683 if (ctxt
->modrm_reg
== VCPU_SREG_CS
|| ctxt
->modrm_reg
> VCPU_SREG_GS
)
3684 return emulate_ud(ctxt
);
3686 if (ctxt
->modrm_reg
== VCPU_SREG_SS
)
3687 ctxt
->interruptibility
= KVM_X86_SHADOW_INT_MOV_SS
;
3689 /* Disable writeback. */
3690 ctxt
->dst
.type
= OP_NONE
;
3691 return load_segment_descriptor(ctxt
, sel
, ctxt
->modrm_reg
);
3694 static int em_sldt(struct x86_emulate_ctxt
*ctxt
)
3696 return em_store_sreg(ctxt
, VCPU_SREG_LDTR
);
3699 static int em_lldt(struct x86_emulate_ctxt
*ctxt
)
3701 u16 sel
= ctxt
->src
.val
;
3703 /* Disable writeback. */
3704 ctxt
->dst
.type
= OP_NONE
;
3705 return load_segment_descriptor(ctxt
, sel
, VCPU_SREG_LDTR
);
3708 static int em_str(struct x86_emulate_ctxt
*ctxt
)
3710 return em_store_sreg(ctxt
, VCPU_SREG_TR
);
3713 static int em_ltr(struct x86_emulate_ctxt
*ctxt
)
3715 u16 sel
= ctxt
->src
.val
;
3717 /* Disable writeback. */
3718 ctxt
->dst
.type
= OP_NONE
;
3719 return load_segment_descriptor(ctxt
, sel
, VCPU_SREG_TR
);
3722 static int em_invlpg(struct x86_emulate_ctxt
*ctxt
)
3727 rc
= linearize(ctxt
, ctxt
->src
.addr
.mem
, 1, false, &linear
);
3728 if (rc
== X86EMUL_CONTINUE
)
3729 ctxt
->ops
->invlpg(ctxt
, linear
);
3730 /* Disable writeback. */
3731 ctxt
->dst
.type
= OP_NONE
;
3732 return X86EMUL_CONTINUE
;
3735 static int em_clts(struct x86_emulate_ctxt
*ctxt
)
3739 cr0
= ctxt
->ops
->get_cr(ctxt
, 0);
3741 ctxt
->ops
->set_cr(ctxt
, 0, cr0
);
3742 return X86EMUL_CONTINUE
;
3745 static int em_hypercall(struct x86_emulate_ctxt
*ctxt
)
3747 int rc
= ctxt
->ops
->fix_hypercall(ctxt
);
3749 if (rc
!= X86EMUL_CONTINUE
)
3752 /* Let the processor re-execute the fixed hypercall */
3753 ctxt
->_eip
= ctxt
->eip
;
3754 /* Disable writeback. */
3755 ctxt
->dst
.type
= OP_NONE
;
3756 return X86EMUL_CONTINUE
;
3759 static int emulate_store_desc_ptr(struct x86_emulate_ctxt
*ctxt
,
3760 void (*get
)(struct x86_emulate_ctxt
*ctxt
,
3761 struct desc_ptr
*ptr
))
3763 struct desc_ptr desc_ptr
;
3765 if ((ctxt
->ops
->get_cr(ctxt
, 4) & X86_CR4_UMIP
) &&
3766 ctxt
->ops
->cpl(ctxt
) > 0)
3767 return emulate_gp(ctxt
, 0);
3769 if (ctxt
->mode
== X86EMUL_MODE_PROT64
)
3771 get(ctxt
, &desc_ptr
);
3772 if (ctxt
->op_bytes
== 2) {
3774 desc_ptr
.address
&= 0x00ffffff;
3776 /* Disable writeback. */
3777 ctxt
->dst
.type
= OP_NONE
;
3778 return segmented_write_std(ctxt
, ctxt
->dst
.addr
.mem
,
3779 &desc_ptr
, 2 + ctxt
->op_bytes
);
3782 static int em_sgdt(struct x86_emulate_ctxt
*ctxt
)
3784 return emulate_store_desc_ptr(ctxt
, ctxt
->ops
->get_gdt
);
3787 static int em_sidt(struct x86_emulate_ctxt
*ctxt
)
3789 return emulate_store_desc_ptr(ctxt
, ctxt
->ops
->get_idt
);
3792 static int em_lgdt_lidt(struct x86_emulate_ctxt
*ctxt
, bool lgdt
)
3794 struct desc_ptr desc_ptr
;
3797 if (ctxt
->mode
== X86EMUL_MODE_PROT64
)
3799 rc
= read_descriptor(ctxt
, ctxt
->src
.addr
.mem
,
3800 &desc_ptr
.size
, &desc_ptr
.address
,
3802 if (rc
!= X86EMUL_CONTINUE
)
3804 if (ctxt
->mode
== X86EMUL_MODE_PROT64
&&
3805 emul_is_noncanonical_address(desc_ptr
.address
, ctxt
))
3806 return emulate_gp(ctxt
, 0);
3808 ctxt
->ops
->set_gdt(ctxt
, &desc_ptr
);
3810 ctxt
->ops
->set_idt(ctxt
, &desc_ptr
);
3811 /* Disable writeback. */
3812 ctxt
->dst
.type
= OP_NONE
;
3813 return X86EMUL_CONTINUE
;
3816 static int em_lgdt(struct x86_emulate_ctxt
*ctxt
)
3818 return em_lgdt_lidt(ctxt
, true);
3821 static int em_lidt(struct x86_emulate_ctxt
*ctxt
)
3823 return em_lgdt_lidt(ctxt
, false);
3826 static int em_smsw(struct x86_emulate_ctxt
*ctxt
)
3828 if ((ctxt
->ops
->get_cr(ctxt
, 4) & X86_CR4_UMIP
) &&
3829 ctxt
->ops
->cpl(ctxt
) > 0)
3830 return emulate_gp(ctxt
, 0);
3832 if (ctxt
->dst
.type
== OP_MEM
)
3833 ctxt
->dst
.bytes
= 2;
3834 ctxt
->dst
.val
= ctxt
->ops
->get_cr(ctxt
, 0);
3835 return X86EMUL_CONTINUE
;
3838 static int em_lmsw(struct x86_emulate_ctxt
*ctxt
)
3840 ctxt
->ops
->set_cr(ctxt
, 0, (ctxt
->ops
->get_cr(ctxt
, 0) & ~0x0eul
)
3841 | (ctxt
->src
.val
& 0x0f));
3842 ctxt
->dst
.type
= OP_NONE
;
3843 return X86EMUL_CONTINUE
;
3846 static int em_loop(struct x86_emulate_ctxt
*ctxt
)
3848 int rc
= X86EMUL_CONTINUE
;
3850 register_address_increment(ctxt
, VCPU_REGS_RCX
, -1);
3851 if ((address_mask(ctxt
, reg_read(ctxt
, VCPU_REGS_RCX
)) != 0) &&
3852 (ctxt
->b
== 0xe2 || test_cc(ctxt
->b
^ 0x5, ctxt
->eflags
)))
3853 rc
= jmp_rel(ctxt
, ctxt
->src
.val
);
3858 static int em_jcxz(struct x86_emulate_ctxt
*ctxt
)
3860 int rc
= X86EMUL_CONTINUE
;
3862 if (address_mask(ctxt
, reg_read(ctxt
, VCPU_REGS_RCX
)) == 0)
3863 rc
= jmp_rel(ctxt
, ctxt
->src
.val
);
3868 static int em_in(struct x86_emulate_ctxt
*ctxt
)
3870 if (!pio_in_emulated(ctxt
, ctxt
->dst
.bytes
, ctxt
->src
.val
,
3872 return X86EMUL_IO_NEEDED
;
3874 return X86EMUL_CONTINUE
;
3877 static int em_out(struct x86_emulate_ctxt
*ctxt
)
3879 ctxt
->ops
->pio_out_emulated(ctxt
, ctxt
->src
.bytes
, ctxt
->dst
.val
,
3881 /* Disable writeback. */
3882 ctxt
->dst
.type
= OP_NONE
;
3883 return X86EMUL_CONTINUE
;
3886 static int em_cli(struct x86_emulate_ctxt
*ctxt
)
3888 if (emulator_bad_iopl(ctxt
))
3889 return emulate_gp(ctxt
, 0);
3891 ctxt
->eflags
&= ~X86_EFLAGS_IF
;
3892 return X86EMUL_CONTINUE
;
3895 static int em_sti(struct x86_emulate_ctxt
*ctxt
)
3897 if (emulator_bad_iopl(ctxt
))
3898 return emulate_gp(ctxt
, 0);
3900 ctxt
->interruptibility
= KVM_X86_SHADOW_INT_STI
;
3901 ctxt
->eflags
|= X86_EFLAGS_IF
;
3902 return X86EMUL_CONTINUE
;
3905 static int em_cpuid(struct x86_emulate_ctxt
*ctxt
)
3907 u32 eax
, ebx
, ecx
, edx
;
3910 ctxt
->ops
->get_msr(ctxt
, MSR_MISC_FEATURES_ENABLES
, &msr
);
3911 if (msr
& MSR_MISC_FEATURES_ENABLES_CPUID_FAULT
&&
3912 ctxt
->ops
->cpl(ctxt
)) {
3913 return emulate_gp(ctxt
, 0);
3916 eax
= reg_read(ctxt
, VCPU_REGS_RAX
);
3917 ecx
= reg_read(ctxt
, VCPU_REGS_RCX
);
3918 ctxt
->ops
->get_cpuid(ctxt
, &eax
, &ebx
, &ecx
, &edx
, false);
3919 *reg_write(ctxt
, VCPU_REGS_RAX
) = eax
;
3920 *reg_write(ctxt
, VCPU_REGS_RBX
) = ebx
;
3921 *reg_write(ctxt
, VCPU_REGS_RCX
) = ecx
;
3922 *reg_write(ctxt
, VCPU_REGS_RDX
) = edx
;
3923 return X86EMUL_CONTINUE
;
3926 static int em_sahf(struct x86_emulate_ctxt
*ctxt
)
3930 flags
= X86_EFLAGS_CF
| X86_EFLAGS_PF
| X86_EFLAGS_AF
| X86_EFLAGS_ZF
|
3932 flags
&= *reg_rmw(ctxt
, VCPU_REGS_RAX
) >> 8;
3934 ctxt
->eflags
&= ~0xffUL
;
3935 ctxt
->eflags
|= flags
| X86_EFLAGS_FIXED
;
3936 return X86EMUL_CONTINUE
;
3939 static int em_lahf(struct x86_emulate_ctxt
*ctxt
)
3941 *reg_rmw(ctxt
, VCPU_REGS_RAX
) &= ~0xff00UL
;
3942 *reg_rmw(ctxt
, VCPU_REGS_RAX
) |= (ctxt
->eflags
& 0xff) << 8;
3943 return X86EMUL_CONTINUE
;
3946 static int em_bswap(struct x86_emulate_ctxt
*ctxt
)
3948 switch (ctxt
->op_bytes
) {
3949 #ifdef CONFIG_X86_64
3951 asm("bswap %0" : "+r"(ctxt
->dst
.val
));
3955 asm("bswap %0" : "+r"(*(u32
*)&ctxt
->dst
.val
));
3958 return X86EMUL_CONTINUE
;
3961 static int em_clflush(struct x86_emulate_ctxt
*ctxt
)
3963 /* emulating clflush regardless of cpuid */
3964 return X86EMUL_CONTINUE
;
3967 static int em_clflushopt(struct x86_emulate_ctxt
*ctxt
)
3969 /* emulating clflushopt regardless of cpuid */
3970 return X86EMUL_CONTINUE
;
3973 static int em_movsxd(struct x86_emulate_ctxt
*ctxt
)
3975 ctxt
->dst
.val
= (s32
) ctxt
->src
.val
;
3976 return X86EMUL_CONTINUE
;
3979 static int check_fxsr(struct x86_emulate_ctxt
*ctxt
)
3981 if (!ctxt
->ops
->guest_has_fxsr(ctxt
))
3982 return emulate_ud(ctxt
);
3984 if (ctxt
->ops
->get_cr(ctxt
, 0) & (X86_CR0_TS
| X86_CR0_EM
))
3985 return emulate_nm(ctxt
);
3988 * Don't emulate a case that should never be hit, instead of working
3989 * around a lack of fxsave64/fxrstor64 on old compilers.
3991 if (ctxt
->mode
>= X86EMUL_MODE_PROT64
)
3992 return X86EMUL_UNHANDLEABLE
;
3994 return X86EMUL_CONTINUE
;
3998 * Hardware doesn't save and restore XMM 0-7 without CR4.OSFXSR, but does save
3999 * and restore MXCSR.
4001 static size_t __fxstate_size(int nregs
)
4003 return offsetof(struct fxregs_state
, xmm_space
[0]) + nregs
* 16;
4006 static inline size_t fxstate_size(struct x86_emulate_ctxt
*ctxt
)
4009 if (ctxt
->mode
== X86EMUL_MODE_PROT64
)
4010 return __fxstate_size(16);
4012 cr4_osfxsr
= ctxt
->ops
->get_cr(ctxt
, 4) & X86_CR4_OSFXSR
;
4013 return __fxstate_size(cr4_osfxsr
? 8 : 0);
4017 * FXSAVE and FXRSTOR have 4 different formats depending on execution mode,
4020 * - like (1), but FIP and FDP (foo) are only 16 bit. At least Intel CPUs
4021 * preserve whole 32 bit values, though, so (1) and (2) are the same wrt.
4023 * 3) 64-bit mode with REX.W prefix
4024 * - like (2), but XMM 8-15 are being saved and restored
4025 * 4) 64-bit mode without REX.W prefix
4026 * - like (3), but FIP and FDP are 64 bit
4028 * Emulation uses (3) for (1) and (2) and preserves XMM 8-15 to reach the
4029 * desired result. (4) is not emulated.
4031 * Note: Guest and host CPUID.(EAX=07H,ECX=0H):EBX[bit 13] (deprecate FPU CS
4032 * and FPU DS) should match.
4034 static int em_fxsave(struct x86_emulate_ctxt
*ctxt
)
4036 struct fxregs_state fx_state
;
4039 rc
= check_fxsr(ctxt
);
4040 if (rc
!= X86EMUL_CONTINUE
)
4045 rc
= asm_safe("fxsave %[fx]", , [fx
] "+m"(fx_state
));
4049 if (rc
!= X86EMUL_CONTINUE
)
4052 return segmented_write_std(ctxt
, ctxt
->memop
.addr
.mem
, &fx_state
,
4053 fxstate_size(ctxt
));
4057 * FXRSTOR might restore XMM registers not provided by the guest. Fill
4058 * in the host registers (via FXSAVE) instead, so they won't be modified.
4059 * (preemption has to stay disabled until FXRSTOR).
4061 * Use noinline to keep the stack for other functions called by callers small.
4063 static noinline
int fxregs_fixup(struct fxregs_state
*fx_state
,
4064 const size_t used_size
)
4066 struct fxregs_state fx_tmp
;
4069 rc
= asm_safe("fxsave %[fx]", , [fx
] "+m"(fx_tmp
));
4070 memcpy((void *)fx_state
+ used_size
, (void *)&fx_tmp
+ used_size
,
4071 __fxstate_size(16) - used_size
);
4076 static int em_fxrstor(struct x86_emulate_ctxt
*ctxt
)
4078 struct fxregs_state fx_state
;
4082 rc
= check_fxsr(ctxt
);
4083 if (rc
!= X86EMUL_CONTINUE
)
4086 size
= fxstate_size(ctxt
);
4087 rc
= segmented_read_std(ctxt
, ctxt
->memop
.addr
.mem
, &fx_state
, size
);
4088 if (rc
!= X86EMUL_CONTINUE
)
4093 if (size
< __fxstate_size(16)) {
4094 rc
= fxregs_fixup(&fx_state
, size
);
4095 if (rc
!= X86EMUL_CONTINUE
)
4099 if (fx_state
.mxcsr
>> 16) {
4100 rc
= emulate_gp(ctxt
, 0);
4104 if (rc
== X86EMUL_CONTINUE
)
4105 rc
= asm_safe("fxrstor %[fx]", : [fx
] "m"(fx_state
));
4113 static int em_xsetbv(struct x86_emulate_ctxt
*ctxt
)
4117 eax
= reg_read(ctxt
, VCPU_REGS_RAX
);
4118 edx
= reg_read(ctxt
, VCPU_REGS_RDX
);
4119 ecx
= reg_read(ctxt
, VCPU_REGS_RCX
);
4121 if (ctxt
->ops
->set_xcr(ctxt
, ecx
, ((u64
)edx
<< 32) | eax
))
4122 return emulate_gp(ctxt
, 0);
4124 return X86EMUL_CONTINUE
;
4127 static bool valid_cr(int nr
)
4139 static int check_cr_access(struct x86_emulate_ctxt
*ctxt
)
4141 if (!valid_cr(ctxt
->modrm_reg
))
4142 return emulate_ud(ctxt
);
4144 return X86EMUL_CONTINUE
;
4147 static int check_dr7_gd(struct x86_emulate_ctxt
*ctxt
)
4151 ctxt
->ops
->get_dr(ctxt
, 7, &dr7
);
4153 /* Check if DR7.Global_Enable is set */
4154 return dr7
& (1 << 13);
4157 static int check_dr_read(struct x86_emulate_ctxt
*ctxt
)
4159 int dr
= ctxt
->modrm_reg
;
4163 return emulate_ud(ctxt
);
4165 cr4
= ctxt
->ops
->get_cr(ctxt
, 4);
4166 if ((cr4
& X86_CR4_DE
) && (dr
== 4 || dr
== 5))
4167 return emulate_ud(ctxt
);
4169 if (check_dr7_gd(ctxt
)) {
4172 ctxt
->ops
->get_dr(ctxt
, 6, &dr6
);
4173 dr6
&= ~DR_TRAP_BITS
;
4174 dr6
|= DR6_BD
| DR6_ACTIVE_LOW
;
4175 ctxt
->ops
->set_dr(ctxt
, 6, dr6
);
4176 return emulate_db(ctxt
);
4179 return X86EMUL_CONTINUE
;
4182 static int check_dr_write(struct x86_emulate_ctxt
*ctxt
)
4184 u64 new_val
= ctxt
->src
.val64
;
4185 int dr
= ctxt
->modrm_reg
;
4187 if ((dr
== 6 || dr
== 7) && (new_val
& 0xffffffff00000000ULL
))
4188 return emulate_gp(ctxt
, 0);
4190 return check_dr_read(ctxt
);
4193 static int check_svme(struct x86_emulate_ctxt
*ctxt
)
4197 ctxt
->ops
->get_msr(ctxt
, MSR_EFER
, &efer
);
4199 if (!(efer
& EFER_SVME
))
4200 return emulate_ud(ctxt
);
4202 return X86EMUL_CONTINUE
;
4205 static int check_svme_pa(struct x86_emulate_ctxt
*ctxt
)
4207 u64 rax
= reg_read(ctxt
, VCPU_REGS_RAX
);
4209 /* Valid physical address? */
4210 if (rax
& 0xffff000000000000ULL
)
4211 return emulate_gp(ctxt
, 0);
4213 return check_svme(ctxt
);
4216 static int check_rdtsc(struct x86_emulate_ctxt
*ctxt
)
4218 u64 cr4
= ctxt
->ops
->get_cr(ctxt
, 4);
4220 if (cr4
& X86_CR4_TSD
&& ctxt
->ops
->cpl(ctxt
))
4221 return emulate_gp(ctxt
, 0);
4223 return X86EMUL_CONTINUE
;
4226 static int check_rdpmc(struct x86_emulate_ctxt
*ctxt
)
4228 u64 cr4
= ctxt
->ops
->get_cr(ctxt
, 4);
4229 u64 rcx
= reg_read(ctxt
, VCPU_REGS_RCX
);
4232 * VMware allows access to these Pseduo-PMCs even when read via RDPMC
4233 * in Ring3 when CR4.PCE=0.
4235 if (enable_vmware_backdoor
&& is_vmware_backdoor_pmc(rcx
))
4236 return X86EMUL_CONTINUE
;
4239 * If CR4.PCE is set, the SDM requires CPL=0 or CR0.PE=0. The CR0.PE
4240 * check however is unnecessary because CPL is always 0 outside
4243 if ((!(cr4
& X86_CR4_PCE
) && ctxt
->ops
->cpl(ctxt
)) ||
4244 ctxt
->ops
->check_pmc(ctxt
, rcx
))
4245 return emulate_gp(ctxt
, 0);
4247 return X86EMUL_CONTINUE
;
4250 static int check_perm_in(struct x86_emulate_ctxt
*ctxt
)
4252 ctxt
->dst
.bytes
= min(ctxt
->dst
.bytes
, 4u);
4253 if (!emulator_io_permited(ctxt
, ctxt
->src
.val
, ctxt
->dst
.bytes
))
4254 return emulate_gp(ctxt
, 0);
4256 return X86EMUL_CONTINUE
;
4259 static int check_perm_out(struct x86_emulate_ctxt
*ctxt
)
4261 ctxt
->src
.bytes
= min(ctxt
->src
.bytes
, 4u);
4262 if (!emulator_io_permited(ctxt
, ctxt
->dst
.val
, ctxt
->src
.bytes
))
4263 return emulate_gp(ctxt
, 0);
4265 return X86EMUL_CONTINUE
;
4268 #define D(_y) { .flags = (_y) }
4269 #define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
4270 #define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
4271 .intercept = x86_intercept_##_i, .check_perm = (_p) }
4272 #define N D(NotImpl)
4273 #define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
4274 #define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
4275 #define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
4276 #define ID(_f, _i) { .flags = ((_f) | InstrDual | ModRM), .u.idual = (_i) }
4277 #define MD(_f, _m) { .flags = ((_f) | ModeDual), .u.mdual = (_m) }
4278 #define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
4279 #define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
4280 #define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
4281 #define II(_f, _e, _i) \
4282 { .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
4283 #define IIP(_f, _e, _i, _p) \
4284 { .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
4285 .intercept = x86_intercept_##_i, .check_perm = (_p) }
4286 #define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
4288 #define D2bv(_f) D((_f) | ByteOp), D(_f)
4289 #define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
4290 #define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
4291 #define F2bv(_f, _e) F((_f) | ByteOp, _e), F(_f, _e)
4292 #define I2bvIP(_f, _e, _i, _p) \
4293 IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
4295 #define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e), \
4296 F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
4297 F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
4299 static const struct opcode group7_rm0
[] = {
4301 I(SrcNone
| Priv
| EmulateOnUD
, em_hypercall
),
4305 static const struct opcode group7_rm1
[] = {
4306 DI(SrcNone
| Priv
, monitor
),
4307 DI(SrcNone
| Priv
, mwait
),
4311 static const struct opcode group7_rm2
[] = {
4313 II(ImplicitOps
| Priv
, em_xsetbv
, xsetbv
),
4317 static const struct opcode group7_rm3
[] = {
4318 DIP(SrcNone
| Prot
| Priv
, vmrun
, check_svme_pa
),
4319 II(SrcNone
| Prot
| EmulateOnUD
, em_hypercall
, vmmcall
),
4320 DIP(SrcNone
| Prot
| Priv
, vmload
, check_svme_pa
),
4321 DIP(SrcNone
| Prot
| Priv
, vmsave
, check_svme_pa
),
4322 DIP(SrcNone
| Prot
| Priv
, stgi
, check_svme
),
4323 DIP(SrcNone
| Prot
| Priv
, clgi
, check_svme
),
4324 DIP(SrcNone
| Prot
| Priv
, skinit
, check_svme
),
4325 DIP(SrcNone
| Prot
| Priv
, invlpga
, check_svme
),
4328 static const struct opcode group7_rm7
[] = {
4330 DIP(SrcNone
, rdtscp
, check_rdtsc
),
4334 static const struct opcode group1
[] = {
4336 F(Lock
| PageTable
, em_or
),
4339 F(Lock
| PageTable
, em_and
),
4345 static const struct opcode group1A
[] = {
4346 I(DstMem
| SrcNone
| Mov
| Stack
| IncSP
| TwoMemOp
, em_pop
), N
, N
, N
, N
, N
, N
, N
,
4349 static const struct opcode group2
[] = {
4350 F(DstMem
| ModRM
, em_rol
),
4351 F(DstMem
| ModRM
, em_ror
),
4352 F(DstMem
| ModRM
, em_rcl
),
4353 F(DstMem
| ModRM
, em_rcr
),
4354 F(DstMem
| ModRM
, em_shl
),
4355 F(DstMem
| ModRM
, em_shr
),
4356 F(DstMem
| ModRM
, em_shl
),
4357 F(DstMem
| ModRM
, em_sar
),
4360 static const struct opcode group3
[] = {
4361 F(DstMem
| SrcImm
| NoWrite
, em_test
),
4362 F(DstMem
| SrcImm
| NoWrite
, em_test
),
4363 F(DstMem
| SrcNone
| Lock
, em_not
),
4364 F(DstMem
| SrcNone
| Lock
, em_neg
),
4365 F(DstXacc
| Src2Mem
, em_mul_ex
),
4366 F(DstXacc
| Src2Mem
, em_imul_ex
),
4367 F(DstXacc
| Src2Mem
, em_div_ex
),
4368 F(DstXacc
| Src2Mem
, em_idiv_ex
),
4371 static const struct opcode group4
[] = {
4372 F(ByteOp
| DstMem
| SrcNone
| Lock
, em_inc
),
4373 F(ByteOp
| DstMem
| SrcNone
| Lock
, em_dec
),
4377 static const struct opcode group5
[] = {
4378 F(DstMem
| SrcNone
| Lock
, em_inc
),
4379 F(DstMem
| SrcNone
| Lock
, em_dec
),
4380 I(SrcMem
| NearBranch
| IsBranch
, em_call_near_abs
),
4381 I(SrcMemFAddr
| ImplicitOps
| IsBranch
, em_call_far
),
4382 I(SrcMem
| NearBranch
| IsBranch
, em_jmp_abs
),
4383 I(SrcMemFAddr
| ImplicitOps
| IsBranch
, em_jmp_far
),
4384 I(SrcMem
| Stack
| TwoMemOp
, em_push
), D(Undefined
),
4387 static const struct opcode group6
[] = {
4388 II(Prot
| DstMem
, em_sldt
, sldt
),
4389 II(Prot
| DstMem
, em_str
, str
),
4390 II(Prot
| Priv
| SrcMem16
, em_lldt
, lldt
),
4391 II(Prot
| Priv
| SrcMem16
, em_ltr
, ltr
),
4395 static const struct group_dual group7
= { {
4396 II(Mov
| DstMem
, em_sgdt
, sgdt
),
4397 II(Mov
| DstMem
, em_sidt
, sidt
),
4398 II(SrcMem
| Priv
, em_lgdt
, lgdt
),
4399 II(SrcMem
| Priv
, em_lidt
, lidt
),
4400 II(SrcNone
| DstMem
| Mov
, em_smsw
, smsw
), N
,
4401 II(SrcMem16
| Mov
| Priv
, em_lmsw
, lmsw
),
4402 II(SrcMem
| ByteOp
| Priv
| NoAccess
, em_invlpg
, invlpg
),
4408 II(SrcNone
| DstMem
| Mov
, em_smsw
, smsw
), N
,
4409 II(SrcMem16
| Mov
| Priv
, em_lmsw
, lmsw
),
4413 static const struct opcode group8
[] = {
4415 F(DstMem
| SrcImmByte
| NoWrite
, em_bt
),
4416 F(DstMem
| SrcImmByte
| Lock
| PageTable
, em_bts
),
4417 F(DstMem
| SrcImmByte
| Lock
, em_btr
),
4418 F(DstMem
| SrcImmByte
| Lock
| PageTable
, em_btc
),
4422 * The "memory" destination is actually always a register, since we come
4423 * from the register case of group9.
4425 static const struct gprefix pfx_0f_c7_7
= {
4426 N
, N
, N
, II(DstMem
| ModRM
| Op3264
| EmulateOnUD
, em_rdpid
, rdpid
),
4430 static const struct group_dual group9
= { {
4431 N
, I(DstMem64
| Lock
| PageTable
, em_cmpxchg8b
), N
, N
, N
, N
, N
, N
,
4433 N
, N
, N
, N
, N
, N
, N
,
4434 GP(0, &pfx_0f_c7_7
),
4437 static const struct opcode group11
[] = {
4438 I(DstMem
| SrcImm
| Mov
| PageTable
, em_mov
),
4442 static const struct gprefix pfx_0f_ae_7
= {
4443 I(SrcMem
| ByteOp
, em_clflush
), I(SrcMem
| ByteOp
, em_clflushopt
), N
, N
,
4446 static const struct group_dual group15
= { {
4447 I(ModRM
| Aligned16
, em_fxsave
),
4448 I(ModRM
| Aligned16
, em_fxrstor
),
4449 N
, N
, N
, N
, N
, GP(0, &pfx_0f_ae_7
),
4451 N
, N
, N
, N
, N
, N
, N
, N
,
4454 static const struct gprefix pfx_0f_6f_0f_7f
= {
4455 I(Mmx
, em_mov
), I(Sse
| Aligned
, em_mov
), N
, I(Sse
| Unaligned
, em_mov
),
4458 static const struct instr_dual instr_dual_0f_2b
= {
4462 static const struct gprefix pfx_0f_2b
= {
4463 ID(0, &instr_dual_0f_2b
), ID(0, &instr_dual_0f_2b
), N
, N
,
4466 static const struct gprefix pfx_0f_10_0f_11
= {
4467 I(Unaligned
, em_mov
), I(Unaligned
, em_mov
), N
, N
,
4470 static const struct gprefix pfx_0f_28_0f_29
= {
4471 I(Aligned
, em_mov
), I(Aligned
, em_mov
), N
, N
,
4474 static const struct gprefix pfx_0f_e7
= {
4475 N
, I(Sse
, em_mov
), N
, N
,
4478 static const struct escape escape_d9
= { {
4479 N
, N
, N
, N
, N
, N
, N
, I(DstMem16
| Mov
, em_fnstcw
),
4482 N
, N
, N
, N
, N
, N
, N
, N
,
4484 N
, N
, N
, N
, N
, N
, N
, N
,
4486 N
, N
, N
, N
, N
, N
, N
, N
,
4488 N
, N
, N
, N
, N
, N
, N
, N
,
4490 N
, N
, N
, N
, N
, N
, N
, N
,
4492 N
, N
, N
, N
, N
, N
, N
, N
,
4494 N
, N
, N
, N
, N
, N
, N
, N
,
4496 N
, N
, N
, N
, N
, N
, N
, N
,
4499 static const struct escape escape_db
= { {
4500 N
, N
, N
, N
, N
, N
, N
, N
,
4503 N
, N
, N
, N
, N
, N
, N
, N
,
4505 N
, N
, N
, N
, N
, N
, N
, N
,
4507 N
, N
, N
, N
, N
, N
, N
, N
,
4509 N
, N
, N
, N
, N
, N
, N
, N
,
4511 N
, N
, N
, I(ImplicitOps
, em_fninit
), N
, N
, N
, N
,
4513 N
, N
, N
, N
, N
, N
, N
, N
,
4515 N
, N
, N
, N
, N
, N
, N
, N
,
4517 N
, N
, N
, N
, N
, N
, N
, N
,
4520 static const struct escape escape_dd
= { {
4521 N
, N
, N
, N
, N
, N
, N
, I(DstMem16
| Mov
, em_fnstsw
),
4524 N
, N
, N
, N
, N
, N
, N
, N
,
4526 N
, N
, N
, N
, N
, N
, N
, N
,
4528 N
, N
, N
, N
, N
, N
, N
, N
,
4530 N
, N
, N
, N
, N
, N
, N
, N
,
4532 N
, N
, N
, N
, N
, N
, N
, N
,
4534 N
, N
, N
, N
, N
, N
, N
, N
,
4536 N
, N
, N
, N
, N
, N
, N
, N
,
4538 N
, N
, N
, N
, N
, N
, N
, N
,
4541 static const struct instr_dual instr_dual_0f_c3
= {
4542 I(DstMem
| SrcReg
| ModRM
| No16
| Mov
, em_mov
), N
4545 static const struct mode_dual mode_dual_63
= {
4546 N
, I(DstReg
| SrcMem32
| ModRM
| Mov
, em_movsxd
)
4549 static const struct opcode opcode_table
[256] = {
4551 F6ALU(Lock
, em_add
),
4552 I(ImplicitOps
| Stack
| No64
| Src2ES
, em_push_sreg
),
4553 I(ImplicitOps
| Stack
| No64
| Src2ES
, em_pop_sreg
),
4555 F6ALU(Lock
| PageTable
, em_or
),
4556 I(ImplicitOps
| Stack
| No64
| Src2CS
, em_push_sreg
),
4559 F6ALU(Lock
, em_adc
),
4560 I(ImplicitOps
| Stack
| No64
| Src2SS
, em_push_sreg
),
4561 I(ImplicitOps
| Stack
| No64
| Src2SS
, em_pop_sreg
),
4563 F6ALU(Lock
, em_sbb
),
4564 I(ImplicitOps
| Stack
| No64
| Src2DS
, em_push_sreg
),
4565 I(ImplicitOps
| Stack
| No64
| Src2DS
, em_pop_sreg
),
4567 F6ALU(Lock
| PageTable
, em_and
), N
, N
,
4569 F6ALU(Lock
, em_sub
), N
, I(ByteOp
| DstAcc
| No64
, em_das
),
4571 F6ALU(Lock
, em_xor
), N
, N
,
4573 F6ALU(NoWrite
, em_cmp
), N
, N
,
4575 X8(F(DstReg
, em_inc
)), X8(F(DstReg
, em_dec
)),
4577 X8(I(SrcReg
| Stack
, em_push
)),
4579 X8(I(DstReg
| Stack
, em_pop
)),
4581 I(ImplicitOps
| Stack
| No64
, em_pusha
),
4582 I(ImplicitOps
| Stack
| No64
, em_popa
),
4583 N
, MD(ModRM
, &mode_dual_63
),
4586 I(SrcImm
| Mov
| Stack
, em_push
),
4587 I(DstReg
| SrcMem
| ModRM
| Src2Imm
, em_imul_3op
),
4588 I(SrcImmByte
| Mov
| Stack
, em_push
),
4589 I(DstReg
| SrcMem
| ModRM
| Src2ImmByte
, em_imul_3op
),
4590 I2bvIP(DstDI
| SrcDX
| Mov
| String
| Unaligned
, em_in
, ins
, check_perm_in
), /* insb, insw/insd */
4591 I2bvIP(SrcSI
| DstDX
| String
, em_out
, outs
, check_perm_out
), /* outsb, outsw/outsd */
4593 X16(D(SrcImmByte
| NearBranch
| IsBranch
)),
4595 G(ByteOp
| DstMem
| SrcImm
, group1
),
4596 G(DstMem
| SrcImm
, group1
),
4597 G(ByteOp
| DstMem
| SrcImm
| No64
, group1
),
4598 G(DstMem
| SrcImmByte
, group1
),
4599 F2bv(DstMem
| SrcReg
| ModRM
| NoWrite
, em_test
),
4600 I2bv(DstMem
| SrcReg
| ModRM
| Lock
| PageTable
, em_xchg
),
4602 I2bv(DstMem
| SrcReg
| ModRM
| Mov
| PageTable
, em_mov
),
4603 I2bv(DstReg
| SrcMem
| ModRM
| Mov
, em_mov
),
4604 I(DstMem
| SrcNone
| ModRM
| Mov
| PageTable
, em_mov_rm_sreg
),
4605 D(ModRM
| SrcMem
| NoAccess
| DstReg
),
4606 I(ImplicitOps
| SrcMem16
| ModRM
, em_mov_sreg_rm
),
4609 DI(SrcAcc
| DstReg
, pause
), X7(D(SrcAcc
| DstReg
)),
4611 D(DstAcc
| SrcNone
), I(ImplicitOps
| SrcAcc
, em_cwd
),
4612 I(SrcImmFAddr
| No64
| IsBranch
, em_call_far
), N
,
4613 II(ImplicitOps
| Stack
, em_pushf
, pushf
),
4614 II(ImplicitOps
| Stack
, em_popf
, popf
),
4615 I(ImplicitOps
, em_sahf
), I(ImplicitOps
, em_lahf
),
4617 I2bv(DstAcc
| SrcMem
| Mov
| MemAbs
, em_mov
),
4618 I2bv(DstMem
| SrcAcc
| Mov
| MemAbs
| PageTable
, em_mov
),
4619 I2bv(SrcSI
| DstDI
| Mov
| String
| TwoMemOp
, em_mov
),
4620 F2bv(SrcSI
| DstDI
| String
| NoWrite
| TwoMemOp
, em_cmp_r
),
4622 F2bv(DstAcc
| SrcImm
| NoWrite
, em_test
),
4623 I2bv(SrcAcc
| DstDI
| Mov
| String
, em_mov
),
4624 I2bv(SrcSI
| DstAcc
| Mov
| String
, em_mov
),
4625 F2bv(SrcAcc
| DstDI
| String
| NoWrite
, em_cmp_r
),
4627 X8(I(ByteOp
| DstReg
| SrcImm
| Mov
, em_mov
)),
4629 X8(I(DstReg
| SrcImm64
| Mov
, em_mov
)),
4631 G(ByteOp
| Src2ImmByte
, group2
), G(Src2ImmByte
, group2
),
4632 I(ImplicitOps
| NearBranch
| SrcImmU16
| IsBranch
, em_ret_near_imm
),
4633 I(ImplicitOps
| NearBranch
| IsBranch
, em_ret
),
4634 I(DstReg
| SrcMemFAddr
| ModRM
| No64
| Src2ES
, em_lseg
),
4635 I(DstReg
| SrcMemFAddr
| ModRM
| No64
| Src2DS
, em_lseg
),
4636 G(ByteOp
, group11
), G(0, group11
),
4638 I(Stack
| SrcImmU16
| Src2ImmByte
| IsBranch
, em_enter
),
4639 I(Stack
| IsBranch
, em_leave
),
4640 I(ImplicitOps
| SrcImmU16
| IsBranch
, em_ret_far_imm
),
4641 I(ImplicitOps
| IsBranch
, em_ret_far
),
4642 D(ImplicitOps
| IsBranch
), DI(SrcImmByte
| IsBranch
, intn
),
4643 D(ImplicitOps
| No64
| IsBranch
),
4644 II(ImplicitOps
| IsBranch
, em_iret
, iret
),
4646 G(Src2One
| ByteOp
, group2
), G(Src2One
, group2
),
4647 G(Src2CL
| ByteOp
, group2
), G(Src2CL
, group2
),
4648 I(DstAcc
| SrcImmUByte
| No64
, em_aam
),
4649 I(DstAcc
| SrcImmUByte
| No64
, em_aad
),
4650 F(DstAcc
| ByteOp
| No64
, em_salc
),
4651 I(DstAcc
| SrcXLat
| ByteOp
, em_mov
),
4653 N
, E(0, &escape_d9
), N
, E(0, &escape_db
), N
, E(0, &escape_dd
), N
, N
,
4655 X3(I(SrcImmByte
| NearBranch
| IsBranch
, em_loop
)),
4656 I(SrcImmByte
| NearBranch
| IsBranch
, em_jcxz
),
4657 I2bvIP(SrcImmUByte
| DstAcc
, em_in
, in
, check_perm_in
),
4658 I2bvIP(SrcAcc
| DstImmUByte
, em_out
, out
, check_perm_out
),
4660 I(SrcImm
| NearBranch
| IsBranch
, em_call
),
4661 D(SrcImm
| ImplicitOps
| NearBranch
| IsBranch
),
4662 I(SrcImmFAddr
| No64
| IsBranch
, em_jmp_far
),
4663 D(SrcImmByte
| ImplicitOps
| NearBranch
| IsBranch
),
4664 I2bvIP(SrcDX
| DstAcc
, em_in
, in
, check_perm_in
),
4665 I2bvIP(SrcAcc
| DstDX
, em_out
, out
, check_perm_out
),
4667 N
, DI(ImplicitOps
, icebp
), N
, N
,
4668 DI(ImplicitOps
| Priv
, hlt
), D(ImplicitOps
),
4669 G(ByteOp
, group3
), G(0, group3
),
4671 D(ImplicitOps
), D(ImplicitOps
),
4672 I(ImplicitOps
, em_cli
), I(ImplicitOps
, em_sti
),
4673 D(ImplicitOps
), D(ImplicitOps
), G(0, group4
), G(0, group5
),
4676 static const struct opcode twobyte_table
[256] = {
4678 G(0, group6
), GD(0, &group7
), N
, N
,
4679 N
, I(ImplicitOps
| EmulateOnUD
| IsBranch
, em_syscall
),
4680 II(ImplicitOps
| Priv
, em_clts
, clts
), N
,
4681 DI(ImplicitOps
| Priv
, invd
), DI(ImplicitOps
| Priv
, wbinvd
), N
, N
,
4682 N
, D(ImplicitOps
| ModRM
| SrcMem
| NoAccess
), N
, N
,
4684 GP(ModRM
| DstReg
| SrcMem
| Mov
| Sse
, &pfx_0f_10_0f_11
),
4685 GP(ModRM
| DstMem
| SrcReg
| Mov
| Sse
, &pfx_0f_10_0f_11
),
4687 D(ImplicitOps
| ModRM
| SrcMem
| NoAccess
), /* 4 * prefetch + 4 * reserved NOP */
4688 D(ImplicitOps
| ModRM
| SrcMem
| NoAccess
), N
, N
,
4689 D(ImplicitOps
| ModRM
| SrcMem
| NoAccess
), /* 8 * reserved NOP */
4690 D(ImplicitOps
| ModRM
| SrcMem
| NoAccess
), /* 8 * reserved NOP */
4691 D(ImplicitOps
| ModRM
| SrcMem
| NoAccess
), /* 8 * reserved NOP */
4692 D(ImplicitOps
| ModRM
| SrcMem
| NoAccess
), /* NOP + 7 * reserved NOP */
4694 DIP(ModRM
| DstMem
| Priv
| Op3264
| NoMod
, cr_read
, check_cr_access
),
4695 DIP(ModRM
| DstMem
| Priv
| Op3264
| NoMod
, dr_read
, check_dr_read
),
4696 IIP(ModRM
| SrcMem
| Priv
| Op3264
| NoMod
, em_cr_write
, cr_write
,
4698 IIP(ModRM
| SrcMem
| Priv
| Op3264
| NoMod
, em_dr_write
, dr_write
,
4701 GP(ModRM
| DstReg
| SrcMem
| Mov
| Sse
, &pfx_0f_28_0f_29
),
4702 GP(ModRM
| DstMem
| SrcReg
| Mov
| Sse
, &pfx_0f_28_0f_29
),
4703 N
, GP(ModRM
| DstMem
| SrcReg
| Mov
| Sse
, &pfx_0f_2b
),
4706 II(ImplicitOps
| Priv
, em_wrmsr
, wrmsr
),
4707 IIP(ImplicitOps
, em_rdtsc
, rdtsc
, check_rdtsc
),
4708 II(ImplicitOps
| Priv
, em_rdmsr
, rdmsr
),
4709 IIP(ImplicitOps
, em_rdpmc
, rdpmc
, check_rdpmc
),
4710 I(ImplicitOps
| EmulateOnUD
| IsBranch
, em_sysenter
),
4711 I(ImplicitOps
| Priv
| EmulateOnUD
| IsBranch
, em_sysexit
),
4713 N
, N
, N
, N
, N
, N
, N
, N
,
4715 X16(D(DstReg
| SrcMem
| ModRM
)),
4717 N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
,
4722 N
, N
, N
, GP(SrcMem
| DstReg
| ModRM
| Mov
, &pfx_0f_6f_0f_7f
),
4727 N
, N
, N
, GP(SrcReg
| DstMem
| ModRM
| Mov
, &pfx_0f_6f_0f_7f
),
4729 X16(D(SrcImm
| NearBranch
| IsBranch
)),
4731 X16(D(ByteOp
| DstMem
| SrcNone
| ModRM
| Mov
)),
4733 I(Stack
| Src2FS
, em_push_sreg
), I(Stack
| Src2FS
, em_pop_sreg
),
4734 II(ImplicitOps
, em_cpuid
, cpuid
),
4735 F(DstMem
| SrcReg
| ModRM
| BitOp
| NoWrite
, em_bt
),
4736 F(DstMem
| SrcReg
| Src2ImmByte
| ModRM
, em_shld
),
4737 F(DstMem
| SrcReg
| Src2CL
| ModRM
, em_shld
), N
, N
,
4739 I(Stack
| Src2GS
, em_push_sreg
), I(Stack
| Src2GS
, em_pop_sreg
),
4740 II(EmulateOnUD
| ImplicitOps
, em_rsm
, rsm
),
4741 F(DstMem
| SrcReg
| ModRM
| BitOp
| Lock
| PageTable
, em_bts
),
4742 F(DstMem
| SrcReg
| Src2ImmByte
| ModRM
, em_shrd
),
4743 F(DstMem
| SrcReg
| Src2CL
| ModRM
, em_shrd
),
4744 GD(0, &group15
), F(DstReg
| SrcMem
| ModRM
, em_imul
),
4746 I2bv(DstMem
| SrcReg
| ModRM
| Lock
| PageTable
| SrcWrite
, em_cmpxchg
),
4747 I(DstReg
| SrcMemFAddr
| ModRM
| Src2SS
, em_lseg
),
4748 F(DstMem
| SrcReg
| ModRM
| BitOp
| Lock
, em_btr
),
4749 I(DstReg
| SrcMemFAddr
| ModRM
| Src2FS
, em_lseg
),
4750 I(DstReg
| SrcMemFAddr
| ModRM
| Src2GS
, em_lseg
),
4751 D(DstReg
| SrcMem8
| ModRM
| Mov
), D(DstReg
| SrcMem16
| ModRM
| Mov
),
4755 F(DstMem
| SrcReg
| ModRM
| BitOp
| Lock
| PageTable
, em_btc
),
4756 I(DstReg
| SrcMem
| ModRM
, em_bsf_c
),
4757 I(DstReg
| SrcMem
| ModRM
, em_bsr_c
),
4758 D(DstReg
| SrcMem8
| ModRM
| Mov
), D(DstReg
| SrcMem16
| ModRM
| Mov
),
4760 F2bv(DstMem
| SrcReg
| ModRM
| SrcWrite
| Lock
, em_xadd
),
4761 N
, ID(0, &instr_dual_0f_c3
),
4762 N
, N
, N
, GD(0, &group9
),
4764 X8(I(DstReg
, em_bswap
)),
4766 N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
,
4768 N
, N
, N
, N
, N
, N
, N
, GP(SrcReg
| DstMem
| ModRM
| Mov
, &pfx_0f_e7
),
4769 N
, N
, N
, N
, N
, N
, N
, N
,
4771 N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
4774 static const struct instr_dual instr_dual_0f_38_f0
= {
4775 I(DstReg
| SrcMem
| Mov
, em_movbe
), N
4778 static const struct instr_dual instr_dual_0f_38_f1
= {
4779 I(DstMem
| SrcReg
| Mov
, em_movbe
), N
4782 static const struct gprefix three_byte_0f_38_f0
= {
4783 ID(0, &instr_dual_0f_38_f0
), N
, N
, N
4786 static const struct gprefix three_byte_0f_38_f1
= {
4787 ID(0, &instr_dual_0f_38_f1
), N
, N
, N
4791 * Insns below are selected by the prefix which indexed by the third opcode
4794 static const struct opcode opcode_map_0f_38
[256] = {
4796 X16(N
), X16(N
), X16(N
), X16(N
), X16(N
), X16(N
), X16(N
), X16(N
),
4798 X16(N
), X16(N
), X16(N
), X16(N
), X16(N
), X16(N
), X16(N
),
4800 GP(EmulateOnUD
| ModRM
, &three_byte_0f_38_f0
),
4801 GP(EmulateOnUD
| ModRM
, &three_byte_0f_38_f1
),
4822 static unsigned imm_size(struct x86_emulate_ctxt
*ctxt
)
4826 size
= (ctxt
->d
& ByteOp
) ? 1 : ctxt
->op_bytes
;
4832 static int decode_imm(struct x86_emulate_ctxt
*ctxt
, struct operand
*op
,
4833 unsigned size
, bool sign_extension
)
4835 int rc
= X86EMUL_CONTINUE
;
4839 op
->addr
.mem
.ea
= ctxt
->_eip
;
4840 /* NB. Immediates are sign-extended as necessary. */
4841 switch (op
->bytes
) {
4843 op
->val
= insn_fetch(s8
, ctxt
);
4846 op
->val
= insn_fetch(s16
, ctxt
);
4849 op
->val
= insn_fetch(s32
, ctxt
);
4852 op
->val
= insn_fetch(s64
, ctxt
);
4855 if (!sign_extension
) {
4856 switch (op
->bytes
) {
4864 op
->val
&= 0xffffffff;
4872 static int decode_operand(struct x86_emulate_ctxt
*ctxt
, struct operand
*op
,
4875 int rc
= X86EMUL_CONTINUE
;
4879 decode_register_operand(ctxt
, op
);
4882 rc
= decode_imm(ctxt
, op
, 1, false);
4885 ctxt
->memop
.bytes
= (ctxt
->d
& ByteOp
) ? 1 : ctxt
->op_bytes
;
4889 if (ctxt
->d
& BitOp
)
4890 fetch_bit_operand(ctxt
);
4891 op
->orig_val
= op
->val
;
4894 ctxt
->memop
.bytes
= (ctxt
->op_bytes
== 8) ? 16 : 8;
4898 op
->bytes
= (ctxt
->d
& ByteOp
) ? 1 : ctxt
->op_bytes
;
4899 op
->addr
.reg
= reg_rmw(ctxt
, VCPU_REGS_RAX
);
4900 fetch_register_operand(op
);
4901 op
->orig_val
= op
->val
;
4905 op
->bytes
= (ctxt
->d
& ByteOp
) ? 2 : ctxt
->op_bytes
;
4906 op
->addr
.reg
= reg_rmw(ctxt
, VCPU_REGS_RAX
);
4907 fetch_register_operand(op
);
4908 op
->orig_val
= op
->val
;
4911 if (ctxt
->d
& ByteOp
) {
4916 op
->bytes
= ctxt
->op_bytes
;
4917 op
->addr
.reg
= reg_rmw(ctxt
, VCPU_REGS_RDX
);
4918 fetch_register_operand(op
);
4919 op
->orig_val
= op
->val
;
4923 op
->bytes
= (ctxt
->d
& ByteOp
) ? 1 : ctxt
->op_bytes
;
4925 register_address(ctxt
, VCPU_REGS_RDI
);
4926 op
->addr
.mem
.seg
= VCPU_SREG_ES
;
4933 op
->addr
.reg
= reg_rmw(ctxt
, VCPU_REGS_RDX
);
4934 fetch_register_operand(op
);
4939 op
->val
= reg_read(ctxt
, VCPU_REGS_RCX
) & 0xff;
4942 rc
= decode_imm(ctxt
, op
, 1, true);
4950 rc
= decode_imm(ctxt
, op
, imm_size(ctxt
), true);
4953 rc
= decode_imm(ctxt
, op
, ctxt
->op_bytes
, true);
4956 ctxt
->memop
.bytes
= 1;
4957 if (ctxt
->memop
.type
== OP_REG
) {
4958 ctxt
->memop
.addr
.reg
= decode_register(ctxt
,
4959 ctxt
->modrm_rm
, true);
4960 fetch_register_operand(&ctxt
->memop
);
4964 ctxt
->memop
.bytes
= 2;
4967 ctxt
->memop
.bytes
= 4;
4970 rc
= decode_imm(ctxt
, op
, 2, false);
4973 rc
= decode_imm(ctxt
, op
, imm_size(ctxt
), false);
4977 op
->bytes
= (ctxt
->d
& ByteOp
) ? 1 : ctxt
->op_bytes
;
4979 register_address(ctxt
, VCPU_REGS_RSI
);
4980 op
->addr
.mem
.seg
= ctxt
->seg_override
;
4986 op
->bytes
= (ctxt
->d
& ByteOp
) ? 1 : ctxt
->op_bytes
;
4989 reg_read(ctxt
, VCPU_REGS_RBX
) +
4990 (reg_read(ctxt
, VCPU_REGS_RAX
) & 0xff));
4991 op
->addr
.mem
.seg
= ctxt
->seg_override
;
4996 op
->addr
.mem
.ea
= ctxt
->_eip
;
4997 op
->bytes
= ctxt
->op_bytes
+ 2;
4998 insn_fetch_arr(op
->valptr
, op
->bytes
, ctxt
);
5001 ctxt
->memop
.bytes
= ctxt
->op_bytes
+ 2;
5005 op
->val
= VCPU_SREG_ES
;
5009 op
->val
= VCPU_SREG_CS
;
5013 op
->val
= VCPU_SREG_SS
;
5017 op
->val
= VCPU_SREG_DS
;
5021 op
->val
= VCPU_SREG_FS
;
5025 op
->val
= VCPU_SREG_GS
;
5028 /* Special instructions do their own operand decoding. */
5030 op
->type
= OP_NONE
; /* Disable writeback. */
5038 int x86_decode_insn(struct x86_emulate_ctxt
*ctxt
, void *insn
, int insn_len
, int emulation_type
)
5040 int rc
= X86EMUL_CONTINUE
;
5041 int mode
= ctxt
->mode
;
5042 int def_op_bytes
, def_ad_bytes
, goffset
, simd_prefix
;
5043 bool op_prefix
= false;
5044 bool has_seg_override
= false;
5045 struct opcode opcode
;
5047 struct desc_struct desc
;
5049 ctxt
->memop
.type
= OP_NONE
;
5050 ctxt
->memopp
= NULL
;
5051 ctxt
->_eip
= ctxt
->eip
;
5052 ctxt
->fetch
.ptr
= ctxt
->fetch
.data
;
5053 ctxt
->fetch
.end
= ctxt
->fetch
.data
+ insn_len
;
5054 ctxt
->opcode_len
= 1;
5055 ctxt
->intercept
= x86_intercept_none
;
5057 memcpy(ctxt
->fetch
.data
, insn
, insn_len
);
5059 rc
= __do_insn_fetch_bytes(ctxt
, 1);
5060 if (rc
!= X86EMUL_CONTINUE
)
5065 case X86EMUL_MODE_REAL
:
5066 case X86EMUL_MODE_VM86
:
5067 def_op_bytes
= def_ad_bytes
= 2;
5068 ctxt
->ops
->get_segment(ctxt
, &dummy
, &desc
, NULL
, VCPU_SREG_CS
);
5070 def_op_bytes
= def_ad_bytes
= 4;
5072 case X86EMUL_MODE_PROT16
:
5073 def_op_bytes
= def_ad_bytes
= 2;
5075 case X86EMUL_MODE_PROT32
:
5076 def_op_bytes
= def_ad_bytes
= 4;
5078 #ifdef CONFIG_X86_64
5079 case X86EMUL_MODE_PROT64
:
5085 return EMULATION_FAILED
;
5088 ctxt
->op_bytes
= def_op_bytes
;
5089 ctxt
->ad_bytes
= def_ad_bytes
;
5091 /* Legacy prefixes. */
5093 switch (ctxt
->b
= insn_fetch(u8
, ctxt
)) {
5094 case 0x66: /* operand-size override */
5096 /* switch between 2/4 bytes */
5097 ctxt
->op_bytes
= def_op_bytes
^ 6;
5099 case 0x67: /* address-size override */
5100 if (mode
== X86EMUL_MODE_PROT64
)
5101 /* switch between 4/8 bytes */
5102 ctxt
->ad_bytes
= def_ad_bytes
^ 12;
5104 /* switch between 2/4 bytes */
5105 ctxt
->ad_bytes
= def_ad_bytes
^ 6;
5107 case 0x26: /* ES override */
5108 has_seg_override
= true;
5109 ctxt
->seg_override
= VCPU_SREG_ES
;
5111 case 0x2e: /* CS override */
5112 has_seg_override
= true;
5113 ctxt
->seg_override
= VCPU_SREG_CS
;
5115 case 0x36: /* SS override */
5116 has_seg_override
= true;
5117 ctxt
->seg_override
= VCPU_SREG_SS
;
5119 case 0x3e: /* DS override */
5120 has_seg_override
= true;
5121 ctxt
->seg_override
= VCPU_SREG_DS
;
5123 case 0x64: /* FS override */
5124 has_seg_override
= true;
5125 ctxt
->seg_override
= VCPU_SREG_FS
;
5127 case 0x65: /* GS override */
5128 has_seg_override
= true;
5129 ctxt
->seg_override
= VCPU_SREG_GS
;
5131 case 0x40 ... 0x4f: /* REX */
5132 if (mode
!= X86EMUL_MODE_PROT64
)
5134 ctxt
->rex_prefix
= ctxt
->b
;
5136 case 0xf0: /* LOCK */
5137 ctxt
->lock_prefix
= 1;
5139 case 0xf2: /* REPNE/REPNZ */
5140 case 0xf3: /* REP/REPE/REPZ */
5141 ctxt
->rep_prefix
= ctxt
->b
;
5147 /* Any legacy prefix after a REX prefix nullifies its effect. */
5149 ctxt
->rex_prefix
= 0;
5155 if (ctxt
->rex_prefix
& 8)
5156 ctxt
->op_bytes
= 8; /* REX.W */
5158 /* Opcode byte(s). */
5159 opcode
= opcode_table
[ctxt
->b
];
5160 /* Two-byte opcode? */
5161 if (ctxt
->b
== 0x0f) {
5162 ctxt
->opcode_len
= 2;
5163 ctxt
->b
= insn_fetch(u8
, ctxt
);
5164 opcode
= twobyte_table
[ctxt
->b
];
5166 /* 0F_38 opcode map */
5167 if (ctxt
->b
== 0x38) {
5168 ctxt
->opcode_len
= 3;
5169 ctxt
->b
= insn_fetch(u8
, ctxt
);
5170 opcode
= opcode_map_0f_38
[ctxt
->b
];
5173 ctxt
->d
= opcode
.flags
;
5175 if (ctxt
->d
& ModRM
)
5176 ctxt
->modrm
= insn_fetch(u8
, ctxt
);
5178 /* vex-prefix instructions are not implemented */
5179 if (ctxt
->opcode_len
== 1 && (ctxt
->b
== 0xc5 || ctxt
->b
== 0xc4) &&
5180 (mode
== X86EMUL_MODE_PROT64
|| (ctxt
->modrm
& 0xc0) == 0xc0)) {
5184 while (ctxt
->d
& GroupMask
) {
5185 switch (ctxt
->d
& GroupMask
) {
5187 goffset
= (ctxt
->modrm
>> 3) & 7;
5188 opcode
= opcode
.u
.group
[goffset
];
5191 goffset
= (ctxt
->modrm
>> 3) & 7;
5192 if ((ctxt
->modrm
>> 6) == 3)
5193 opcode
= opcode
.u
.gdual
->mod3
[goffset
];
5195 opcode
= opcode
.u
.gdual
->mod012
[goffset
];
5198 goffset
= ctxt
->modrm
& 7;
5199 opcode
= opcode
.u
.group
[goffset
];
5202 if (ctxt
->rep_prefix
&& op_prefix
)
5203 return EMULATION_FAILED
;
5204 simd_prefix
= op_prefix
? 0x66 : ctxt
->rep_prefix
;
5205 switch (simd_prefix
) {
5206 case 0x00: opcode
= opcode
.u
.gprefix
->pfx_no
; break;
5207 case 0x66: opcode
= opcode
.u
.gprefix
->pfx_66
; break;
5208 case 0xf2: opcode
= opcode
.u
.gprefix
->pfx_f2
; break;
5209 case 0xf3: opcode
= opcode
.u
.gprefix
->pfx_f3
; break;
5213 if (ctxt
->modrm
> 0xbf) {
5214 size_t size
= ARRAY_SIZE(opcode
.u
.esc
->high
);
5215 u32 index
= array_index_nospec(
5216 ctxt
->modrm
- 0xc0, size
);
5218 opcode
= opcode
.u
.esc
->high
[index
];
5220 opcode
= opcode
.u
.esc
->op
[(ctxt
->modrm
>> 3) & 7];
5224 if ((ctxt
->modrm
>> 6) == 3)
5225 opcode
= opcode
.u
.idual
->mod3
;
5227 opcode
= opcode
.u
.idual
->mod012
;
5230 if (ctxt
->mode
== X86EMUL_MODE_PROT64
)
5231 opcode
= opcode
.u
.mdual
->mode64
;
5233 opcode
= opcode
.u
.mdual
->mode32
;
5236 return EMULATION_FAILED
;
5239 ctxt
->d
&= ~(u64
)GroupMask
;
5240 ctxt
->d
|= opcode
.flags
;
5243 ctxt
->is_branch
= opcode
.flags
& IsBranch
;
5247 return EMULATION_FAILED
;
5249 ctxt
->execute
= opcode
.u
.execute
;
5251 if (unlikely(emulation_type
& EMULTYPE_TRAP_UD
) &&
5252 likely(!(ctxt
->d
& EmulateOnUD
)))
5253 return EMULATION_FAILED
;
5255 if (unlikely(ctxt
->d
&
5256 (NotImpl
|Stack
|Op3264
|Sse
|Mmx
|Intercept
|CheckPerm
|NearBranch
|
5259 * These are copied unconditionally here, and checked unconditionally
5260 * in x86_emulate_insn.
5262 ctxt
->check_perm
= opcode
.check_perm
;
5263 ctxt
->intercept
= opcode
.intercept
;
5265 if (ctxt
->d
& NotImpl
)
5266 return EMULATION_FAILED
;
5268 if (mode
== X86EMUL_MODE_PROT64
) {
5269 if (ctxt
->op_bytes
== 4 && (ctxt
->d
& Stack
))
5271 else if (ctxt
->d
& NearBranch
)
5275 if (ctxt
->d
& Op3264
) {
5276 if (mode
== X86EMUL_MODE_PROT64
)
5282 if ((ctxt
->d
& No16
) && ctxt
->op_bytes
== 2)
5286 ctxt
->op_bytes
= 16;
5287 else if (ctxt
->d
& Mmx
)
5291 /* ModRM and SIB bytes. */
5292 if (ctxt
->d
& ModRM
) {
5293 rc
= decode_modrm(ctxt
, &ctxt
->memop
);
5294 if (!has_seg_override
) {
5295 has_seg_override
= true;
5296 ctxt
->seg_override
= ctxt
->modrm_seg
;
5298 } else if (ctxt
->d
& MemAbs
)
5299 rc
= decode_abs(ctxt
, &ctxt
->memop
);
5300 if (rc
!= X86EMUL_CONTINUE
)
5303 if (!has_seg_override
)
5304 ctxt
->seg_override
= VCPU_SREG_DS
;
5306 ctxt
->memop
.addr
.mem
.seg
= ctxt
->seg_override
;
5309 * Decode and fetch the source operand: register, memory
5312 rc
= decode_operand(ctxt
, &ctxt
->src
, (ctxt
->d
>> SrcShift
) & OpMask
);
5313 if (rc
!= X86EMUL_CONTINUE
)
5317 * Decode and fetch the second source operand: register, memory
5320 rc
= decode_operand(ctxt
, &ctxt
->src2
, (ctxt
->d
>> Src2Shift
) & OpMask
);
5321 if (rc
!= X86EMUL_CONTINUE
)
5324 /* Decode and fetch the destination operand: register or memory. */
5325 rc
= decode_operand(ctxt
, &ctxt
->dst
, (ctxt
->d
>> DstShift
) & OpMask
);
5327 if (ctxt
->rip_relative
&& likely(ctxt
->memopp
))
5328 ctxt
->memopp
->addr
.mem
.ea
= address_mask(ctxt
,
5329 ctxt
->memopp
->addr
.mem
.ea
+ ctxt
->_eip
);
5332 if (rc
== X86EMUL_PROPAGATE_FAULT
)
5333 ctxt
->have_exception
= true;
5334 return (rc
!= X86EMUL_CONTINUE
) ? EMULATION_FAILED
: EMULATION_OK
;
5337 bool x86_page_table_writing_insn(struct x86_emulate_ctxt
*ctxt
)
5339 return ctxt
->d
& PageTable
;
5342 static bool string_insn_completed(struct x86_emulate_ctxt
*ctxt
)
5344 /* The second termination condition only applies for REPE
5345 * and REPNE. Test if the repeat string operation prefix is
5346 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
5347 * corresponding termination condition according to:
5348 * - if REPE/REPZ and ZF = 0 then done
5349 * - if REPNE/REPNZ and ZF = 1 then done
5351 if (((ctxt
->b
== 0xa6) || (ctxt
->b
== 0xa7) ||
5352 (ctxt
->b
== 0xae) || (ctxt
->b
== 0xaf))
5353 && (((ctxt
->rep_prefix
== REPE_PREFIX
) &&
5354 ((ctxt
->eflags
& X86_EFLAGS_ZF
) == 0))
5355 || ((ctxt
->rep_prefix
== REPNE_PREFIX
) &&
5356 ((ctxt
->eflags
& X86_EFLAGS_ZF
) == X86_EFLAGS_ZF
))))
5362 static int flush_pending_x87_faults(struct x86_emulate_ctxt
*ctxt
)
5367 rc
= asm_safe("fwait");
5370 if (unlikely(rc
!= X86EMUL_CONTINUE
))
5371 return emulate_exception(ctxt
, MF_VECTOR
, 0, false);
5373 return X86EMUL_CONTINUE
;
5376 static void fetch_possible_mmx_operand(struct operand
*op
)
5378 if (op
->type
== OP_MM
)
5379 kvm_read_mmx_reg(op
->addr
.mm
, &op
->mm_val
);
5382 static int fastop(struct x86_emulate_ctxt
*ctxt
, fastop_t fop
)
5384 ulong flags
= (ctxt
->eflags
& EFLAGS_MASK
) | X86_EFLAGS_IF
;
5386 if (!(ctxt
->d
& ByteOp
))
5387 fop
+= __ffs(ctxt
->dst
.bytes
) * FASTOP_SIZE
;
5389 asm("push %[flags]; popf; " CALL_NOSPEC
" ; pushf; pop %[flags]\n"
5390 : "+a"(ctxt
->dst
.val
), "+d"(ctxt
->src
.val
), [flags
]"+D"(flags
),
5391 [thunk_target
]"+S"(fop
), ASM_CALL_CONSTRAINT
5392 : "c"(ctxt
->src2
.val
));
5394 ctxt
->eflags
= (ctxt
->eflags
& ~EFLAGS_MASK
) | (flags
& EFLAGS_MASK
);
5395 if (!fop
) /* exception is returned in fop variable */
5396 return emulate_de(ctxt
);
5397 return X86EMUL_CONTINUE
;
5400 void init_decode_cache(struct x86_emulate_ctxt
*ctxt
)
5402 memset(&ctxt
->rip_relative
, 0,
5403 (void *)&ctxt
->modrm
- (void *)&ctxt
->rip_relative
);
5405 ctxt
->io_read
.pos
= 0;
5406 ctxt
->io_read
.end
= 0;
5407 ctxt
->mem_read
.end
= 0;
5410 int x86_emulate_insn(struct x86_emulate_ctxt
*ctxt
)
5412 const struct x86_emulate_ops
*ops
= ctxt
->ops
;
5413 int rc
= X86EMUL_CONTINUE
;
5414 int saved_dst_type
= ctxt
->dst
.type
;
5415 unsigned emul_flags
;
5417 ctxt
->mem_read
.pos
= 0;
5419 /* LOCK prefix is allowed only with some instructions */
5420 if (ctxt
->lock_prefix
&& (!(ctxt
->d
& Lock
) || ctxt
->dst
.type
!= OP_MEM
)) {
5421 rc
= emulate_ud(ctxt
);
5425 if ((ctxt
->d
& SrcMask
) == SrcMemFAddr
&& ctxt
->src
.type
!= OP_MEM
) {
5426 rc
= emulate_ud(ctxt
);
5430 emul_flags
= ctxt
->ops
->get_hflags(ctxt
);
5431 if (unlikely(ctxt
->d
&
5432 (No64
|Undefined
|Sse
|Mmx
|Intercept
|CheckPerm
|Priv
|Prot
|String
))) {
5433 if ((ctxt
->mode
== X86EMUL_MODE_PROT64
&& (ctxt
->d
& No64
)) ||
5434 (ctxt
->d
& Undefined
)) {
5435 rc
= emulate_ud(ctxt
);
5439 if (((ctxt
->d
& (Sse
|Mmx
)) && ((ops
->get_cr(ctxt
, 0) & X86_CR0_EM
)))
5440 || ((ctxt
->d
& Sse
) && !(ops
->get_cr(ctxt
, 4) & X86_CR4_OSFXSR
))) {
5441 rc
= emulate_ud(ctxt
);
5445 if ((ctxt
->d
& (Sse
|Mmx
)) && (ops
->get_cr(ctxt
, 0) & X86_CR0_TS
)) {
5446 rc
= emulate_nm(ctxt
);
5450 if (ctxt
->d
& Mmx
) {
5451 rc
= flush_pending_x87_faults(ctxt
);
5452 if (rc
!= X86EMUL_CONTINUE
)
5455 * Now that we know the fpu is exception safe, we can fetch
5458 fetch_possible_mmx_operand(&ctxt
->src
);
5459 fetch_possible_mmx_operand(&ctxt
->src2
);
5460 if (!(ctxt
->d
& Mov
))
5461 fetch_possible_mmx_operand(&ctxt
->dst
);
5464 if (unlikely(emul_flags
& X86EMUL_GUEST_MASK
) && ctxt
->intercept
) {
5465 rc
= emulator_check_intercept(ctxt
, ctxt
->intercept
,
5466 X86_ICPT_PRE_EXCEPT
);
5467 if (rc
!= X86EMUL_CONTINUE
)
5471 /* Instruction can only be executed in protected mode */
5472 if ((ctxt
->d
& Prot
) && ctxt
->mode
< X86EMUL_MODE_PROT16
) {
5473 rc
= emulate_ud(ctxt
);
5477 /* Privileged instruction can be executed only in CPL=0 */
5478 if ((ctxt
->d
& Priv
) && ops
->cpl(ctxt
)) {
5479 if (ctxt
->d
& PrivUD
)
5480 rc
= emulate_ud(ctxt
);
5482 rc
= emulate_gp(ctxt
, 0);
5486 /* Do instruction specific permission checks */
5487 if (ctxt
->d
& CheckPerm
) {
5488 rc
= ctxt
->check_perm(ctxt
);
5489 if (rc
!= X86EMUL_CONTINUE
)
5493 if (unlikely(emul_flags
& X86EMUL_GUEST_MASK
) && (ctxt
->d
& Intercept
)) {
5494 rc
= emulator_check_intercept(ctxt
, ctxt
->intercept
,
5495 X86_ICPT_POST_EXCEPT
);
5496 if (rc
!= X86EMUL_CONTINUE
)
5500 if (ctxt
->rep_prefix
&& (ctxt
->d
& String
)) {
5501 /* All REP prefixes have the same first termination condition */
5502 if (address_mask(ctxt
, reg_read(ctxt
, VCPU_REGS_RCX
)) == 0) {
5503 string_registers_quirk(ctxt
);
5504 ctxt
->eip
= ctxt
->_eip
;
5505 ctxt
->eflags
&= ~X86_EFLAGS_RF
;
5511 if ((ctxt
->src
.type
== OP_MEM
) && !(ctxt
->d
& NoAccess
)) {
5512 rc
= segmented_read(ctxt
, ctxt
->src
.addr
.mem
,
5513 ctxt
->src
.valptr
, ctxt
->src
.bytes
);
5514 if (rc
!= X86EMUL_CONTINUE
)
5516 ctxt
->src
.orig_val64
= ctxt
->src
.val64
;
5519 if (ctxt
->src2
.type
== OP_MEM
) {
5520 rc
= segmented_read(ctxt
, ctxt
->src2
.addr
.mem
,
5521 &ctxt
->src2
.val
, ctxt
->src2
.bytes
);
5522 if (rc
!= X86EMUL_CONTINUE
)
5526 if ((ctxt
->d
& DstMask
) == ImplicitOps
)
5530 if ((ctxt
->dst
.type
== OP_MEM
) && !(ctxt
->d
& Mov
)) {
5531 /* optimisation - avoid slow emulated read if Mov */
5532 rc
= segmented_read(ctxt
, ctxt
->dst
.addr
.mem
,
5533 &ctxt
->dst
.val
, ctxt
->dst
.bytes
);
5534 if (rc
!= X86EMUL_CONTINUE
) {
5535 if (!(ctxt
->d
& NoWrite
) &&
5536 rc
== X86EMUL_PROPAGATE_FAULT
&&
5537 ctxt
->exception
.vector
== PF_VECTOR
)
5538 ctxt
->exception
.error_code
|= PFERR_WRITE_MASK
;
5542 /* Copy full 64-bit value for CMPXCHG8B. */
5543 ctxt
->dst
.orig_val64
= ctxt
->dst
.val64
;
5547 if (unlikely(emul_flags
& X86EMUL_GUEST_MASK
) && (ctxt
->d
& Intercept
)) {
5548 rc
= emulator_check_intercept(ctxt
, ctxt
->intercept
,
5549 X86_ICPT_POST_MEMACCESS
);
5550 if (rc
!= X86EMUL_CONTINUE
)
5554 if (ctxt
->rep_prefix
&& (ctxt
->d
& String
))
5555 ctxt
->eflags
|= X86_EFLAGS_RF
;
5557 ctxt
->eflags
&= ~X86_EFLAGS_RF
;
5559 if (ctxt
->execute
) {
5560 if (ctxt
->d
& Fastop
)
5561 rc
= fastop(ctxt
, ctxt
->fop
);
5563 rc
= ctxt
->execute(ctxt
);
5564 if (rc
!= X86EMUL_CONTINUE
)
5569 if (ctxt
->opcode_len
== 2)
5571 else if (ctxt
->opcode_len
== 3)
5572 goto threebyte_insn
;
5575 case 0x70 ... 0x7f: /* jcc (short) */
5576 if (test_cc(ctxt
->b
, ctxt
->eflags
))
5577 rc
= jmp_rel(ctxt
, ctxt
->src
.val
);
5579 case 0x8d: /* lea r16/r32, m */
5580 ctxt
->dst
.val
= ctxt
->src
.addr
.mem
.ea
;
5582 case 0x90 ... 0x97: /* nop / xchg reg, rax */
5583 if (ctxt
->dst
.addr
.reg
== reg_rmw(ctxt
, VCPU_REGS_RAX
))
5584 ctxt
->dst
.type
= OP_NONE
;
5588 case 0x98: /* cbw/cwde/cdqe */
5589 switch (ctxt
->op_bytes
) {
5590 case 2: ctxt
->dst
.val
= (s8
)ctxt
->dst
.val
; break;
5591 case 4: ctxt
->dst
.val
= (s16
)ctxt
->dst
.val
; break;
5592 case 8: ctxt
->dst
.val
= (s32
)ctxt
->dst
.val
; break;
5595 case 0xcc: /* int3 */
5596 rc
= emulate_int(ctxt
, 3);
5598 case 0xcd: /* int n */
5599 rc
= emulate_int(ctxt
, ctxt
->src
.val
);
5601 case 0xce: /* into */
5602 if (ctxt
->eflags
& X86_EFLAGS_OF
)
5603 rc
= emulate_int(ctxt
, 4);
5605 case 0xe9: /* jmp rel */
5606 case 0xeb: /* jmp rel short */
5607 rc
= jmp_rel(ctxt
, ctxt
->src
.val
);
5608 ctxt
->dst
.type
= OP_NONE
; /* Disable writeback. */
5610 case 0xf4: /* hlt */
5611 ctxt
->ops
->halt(ctxt
);
5613 case 0xf5: /* cmc */
5614 /* complement carry flag from eflags reg */
5615 ctxt
->eflags
^= X86_EFLAGS_CF
;
5617 case 0xf8: /* clc */
5618 ctxt
->eflags
&= ~X86_EFLAGS_CF
;
5620 case 0xf9: /* stc */
5621 ctxt
->eflags
|= X86_EFLAGS_CF
;
5623 case 0xfc: /* cld */
5624 ctxt
->eflags
&= ~X86_EFLAGS_DF
;
5626 case 0xfd: /* std */
5627 ctxt
->eflags
|= X86_EFLAGS_DF
;
5630 goto cannot_emulate
;
5633 if (rc
!= X86EMUL_CONTINUE
)
5637 if (ctxt
->d
& SrcWrite
) {
5638 BUG_ON(ctxt
->src
.type
== OP_MEM
|| ctxt
->src
.type
== OP_MEM_STR
);
5639 rc
= writeback(ctxt
, &ctxt
->src
);
5640 if (rc
!= X86EMUL_CONTINUE
)
5643 if (!(ctxt
->d
& NoWrite
)) {
5644 rc
= writeback(ctxt
, &ctxt
->dst
);
5645 if (rc
!= X86EMUL_CONTINUE
)
5650 * restore dst type in case the decoding will be reused
5651 * (happens for string instruction )
5653 ctxt
->dst
.type
= saved_dst_type
;
5655 if ((ctxt
->d
& SrcMask
) == SrcSI
)
5656 string_addr_inc(ctxt
, VCPU_REGS_RSI
, &ctxt
->src
);
5658 if ((ctxt
->d
& DstMask
) == DstDI
)
5659 string_addr_inc(ctxt
, VCPU_REGS_RDI
, &ctxt
->dst
);
5661 if (ctxt
->rep_prefix
&& (ctxt
->d
& String
)) {
5663 struct read_cache
*r
= &ctxt
->io_read
;
5664 if ((ctxt
->d
& SrcMask
) == SrcSI
)
5665 count
= ctxt
->src
.count
;
5667 count
= ctxt
->dst
.count
;
5668 register_address_increment(ctxt
, VCPU_REGS_RCX
, -count
);
5670 if (!string_insn_completed(ctxt
)) {
5672 * Re-enter guest when pio read ahead buffer is empty
5673 * or, if it is not used, after each 1024 iteration.
5675 if ((r
->end
!= 0 || reg_read(ctxt
, VCPU_REGS_RCX
) & 0x3ff) &&
5676 (r
->end
== 0 || r
->end
!= r
->pos
)) {
5678 * Reset read cache. Usually happens before
5679 * decode, but since instruction is restarted
5680 * we have to do it here.
5682 ctxt
->mem_read
.end
= 0;
5683 writeback_registers(ctxt
);
5684 return EMULATION_RESTART
;
5686 goto done
; /* skip rip writeback */
5688 ctxt
->eflags
&= ~X86_EFLAGS_RF
;
5691 ctxt
->eip
= ctxt
->_eip
;
5692 if (ctxt
->mode
!= X86EMUL_MODE_PROT64
)
5693 ctxt
->eip
= (u32
)ctxt
->_eip
;
5696 if (rc
== X86EMUL_PROPAGATE_FAULT
) {
5697 WARN_ON(ctxt
->exception
.vector
> 0x1f);
5698 ctxt
->have_exception
= true;
5700 if (rc
== X86EMUL_INTERCEPTED
)
5701 return EMULATION_INTERCEPTED
;
5703 if (rc
== X86EMUL_CONTINUE
)
5704 writeback_registers(ctxt
);
5706 return (rc
== X86EMUL_UNHANDLEABLE
) ? EMULATION_FAILED
: EMULATION_OK
;
5710 case 0x09: /* wbinvd */
5711 (ctxt
->ops
->wbinvd
)(ctxt
);
5713 case 0x08: /* invd */
5714 case 0x0d: /* GrpP (prefetch) */
5715 case 0x18: /* Grp16 (prefetch/nop) */
5716 case 0x1f: /* nop */
5718 case 0x20: /* mov cr, reg */
5719 ctxt
->dst
.val
= ops
->get_cr(ctxt
, ctxt
->modrm_reg
);
5721 case 0x21: /* mov from dr to reg */
5722 ops
->get_dr(ctxt
, ctxt
->modrm_reg
, &ctxt
->dst
.val
);
5724 case 0x40 ... 0x4f: /* cmov */
5725 if (test_cc(ctxt
->b
, ctxt
->eflags
))
5726 ctxt
->dst
.val
= ctxt
->src
.val
;
5727 else if (ctxt
->op_bytes
!= 4)
5728 ctxt
->dst
.type
= OP_NONE
; /* no writeback */
5730 case 0x80 ... 0x8f: /* jnz rel, etc*/
5731 if (test_cc(ctxt
->b
, ctxt
->eflags
))
5732 rc
= jmp_rel(ctxt
, ctxt
->src
.val
);
5734 case 0x90 ... 0x9f: /* setcc r/m8 */
5735 ctxt
->dst
.val
= test_cc(ctxt
->b
, ctxt
->eflags
);
5737 case 0xb6 ... 0xb7: /* movzx */
5738 ctxt
->dst
.bytes
= ctxt
->op_bytes
;
5739 ctxt
->dst
.val
= (ctxt
->src
.bytes
== 1) ? (u8
) ctxt
->src
.val
5740 : (u16
) ctxt
->src
.val
;
5742 case 0xbe ... 0xbf: /* movsx */
5743 ctxt
->dst
.bytes
= ctxt
->op_bytes
;
5744 ctxt
->dst
.val
= (ctxt
->src
.bytes
== 1) ? (s8
) ctxt
->src
.val
:
5745 (s16
) ctxt
->src
.val
;
5748 goto cannot_emulate
;
5753 if (rc
!= X86EMUL_CONTINUE
)
5759 return EMULATION_FAILED
;
5762 void emulator_invalidate_register_cache(struct x86_emulate_ctxt
*ctxt
)
5764 invalidate_registers(ctxt
);
5767 void emulator_writeback_register_cache(struct x86_emulate_ctxt
*ctxt
)
5769 writeback_registers(ctxt
);
5772 bool emulator_can_use_gpa(struct x86_emulate_ctxt
*ctxt
)
5774 if (ctxt
->rep_prefix
&& (ctxt
->d
& String
))
5777 if (ctxt
->d
& TwoMemOp
)