1 /******************************************************************************
4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
6 * Copyright (c) 2005 Keir Fraser
8 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
9 * privileged instructions:
11 * Copyright (C) 2006 Qumranet
12 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
14 * Avi Kivity <avi@qumranet.com>
15 * Yaniv Kamay <yaniv@qumranet.com>
17 * This work is licensed under the terms of the GNU GPL, version 2. See
18 * the COPYING file in the top-level directory.
20 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
23 #include <linux/kvm_host.h>
24 #include "kvm_cache_regs.h"
25 #include <asm/kvm_emulate.h>
26 #include <linux/stringify.h>
27 #include <asm/debugreg.h>
28 #include <asm/nospec-branch.h>
39 #define OpImplicit 1ull /* No generic decode */
40 #define OpReg 2ull /* Register */
41 #define OpMem 3ull /* Memory */
42 #define OpAcc 4ull /* Accumulator: AL/AX/EAX/RAX */
43 #define OpDI 5ull /* ES:DI/EDI/RDI */
44 #define OpMem64 6ull /* Memory, 64-bit */
45 #define OpImmUByte 7ull /* Zero-extended 8-bit immediate */
46 #define OpDX 8ull /* DX register */
47 #define OpCL 9ull /* CL register (for shifts) */
48 #define OpImmByte 10ull /* 8-bit sign extended immediate */
49 #define OpOne 11ull /* Implied 1 */
50 #define OpImm 12ull /* Sign extended up to 32-bit immediate */
51 #define OpMem16 13ull /* Memory operand (16-bit). */
52 #define OpMem32 14ull /* Memory operand (32-bit). */
53 #define OpImmU 15ull /* Immediate operand, zero extended */
54 #define OpSI 16ull /* SI/ESI/RSI */
55 #define OpImmFAddr 17ull /* Immediate far address */
56 #define OpMemFAddr 18ull /* Far address in memory */
57 #define OpImmU16 19ull /* Immediate operand, 16 bits, zero extended */
58 #define OpES 20ull /* ES */
59 #define OpCS 21ull /* CS */
60 #define OpSS 22ull /* SS */
61 #define OpDS 23ull /* DS */
62 #define OpFS 24ull /* FS */
63 #define OpGS 25ull /* GS */
64 #define OpMem8 26ull /* 8-bit zero extended memory operand */
65 #define OpImm64 27ull /* Sign extended 16/32/64-bit immediate */
66 #define OpXLat 28ull /* memory at BX/EBX/RBX + zero-extended AL */
67 #define OpAccLo 29ull /* Low part of extended acc (AX/AX/EAX/RAX) */
68 #define OpAccHi 30ull /* High part of extended acc (-/DX/EDX/RDX) */
70 #define OpBits 5 /* Width of operand field */
71 #define OpMask ((1ull << OpBits) - 1)
74 * Opcode effective-address decode tables.
75 * Note that we only emulate instructions that have at least one memory
76 * operand (excluding implicit stack references). We assume that stack
77 * references and instruction fetches will never occur in special memory
78 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
82 /* Operand sizes: 8-bit operands or specified/overridden size. */
83 #define ByteOp (1<<0) /* 8-bit operands. */
84 /* Destination operand type. */
86 #define ImplicitOps (OpImplicit << DstShift)
87 #define DstReg (OpReg << DstShift)
88 #define DstMem (OpMem << DstShift)
89 #define DstAcc (OpAcc << DstShift)
90 #define DstDI (OpDI << DstShift)
91 #define DstMem64 (OpMem64 << DstShift)
92 #define DstMem16 (OpMem16 << DstShift)
93 #define DstImmUByte (OpImmUByte << DstShift)
94 #define DstDX (OpDX << DstShift)
95 #define DstAccLo (OpAccLo << DstShift)
96 #define DstMask (OpMask << DstShift)
97 /* Source operand type. */
99 #define SrcNone (OpNone << SrcShift)
100 #define SrcReg (OpReg << SrcShift)
101 #define SrcMem (OpMem << SrcShift)
102 #define SrcMem16 (OpMem16 << SrcShift)
103 #define SrcMem32 (OpMem32 << SrcShift)
104 #define SrcImm (OpImm << SrcShift)
105 #define SrcImmByte (OpImmByte << SrcShift)
106 #define SrcOne (OpOne << SrcShift)
107 #define SrcImmUByte (OpImmUByte << SrcShift)
108 #define SrcImmU (OpImmU << SrcShift)
109 #define SrcSI (OpSI << SrcShift)
110 #define SrcXLat (OpXLat << SrcShift)
111 #define SrcImmFAddr (OpImmFAddr << SrcShift)
112 #define SrcMemFAddr (OpMemFAddr << SrcShift)
113 #define SrcAcc (OpAcc << SrcShift)
114 #define SrcImmU16 (OpImmU16 << SrcShift)
115 #define SrcImm64 (OpImm64 << SrcShift)
116 #define SrcDX (OpDX << SrcShift)
117 #define SrcMem8 (OpMem8 << SrcShift)
118 #define SrcAccHi (OpAccHi << SrcShift)
119 #define SrcMask (OpMask << SrcShift)
120 #define BitOp (1<<11)
121 #define MemAbs (1<<12) /* Memory operand is absolute displacement */
122 #define String (1<<13) /* String instruction (rep capable) */
123 #define Stack (1<<14) /* Stack instruction (push/pop) */
124 #define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */
125 #define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */
126 #define GroupDual (2<<15) /* Alternate decoding of mod == 3 */
127 #define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */
128 #define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */
129 #define Escape (5<<15) /* Escape to coprocessor instruction */
130 #define InstrDual (6<<15) /* Alternate instruction decoding of mod == 3 */
131 #define ModeDual (7<<15) /* Different instruction for 32/64 bit */
132 #define Sse (1<<18) /* SSE Vector instruction */
133 /* Generic ModRM decode. */
134 #define ModRM (1<<19)
135 /* Destination is only written; never read. */
138 #define Prot (1<<21) /* instruction generates #UD if not in prot-mode */
139 #define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */
140 #define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
141 #define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */
142 #define Undefined (1<<25) /* No Such Instruction */
143 #define Lock (1<<26) /* lock prefix is allowed for the instruction */
144 #define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
146 #define PageTable (1 << 29) /* instruction used to write page table */
147 #define NotImpl (1 << 30) /* instruction is not implemented */
148 /* Source 2 operand type */
149 #define Src2Shift (31)
150 #define Src2None (OpNone << Src2Shift)
151 #define Src2Mem (OpMem << Src2Shift)
152 #define Src2CL (OpCL << Src2Shift)
153 #define Src2ImmByte (OpImmByte << Src2Shift)
154 #define Src2One (OpOne << Src2Shift)
155 #define Src2Imm (OpImm << Src2Shift)
156 #define Src2ES (OpES << Src2Shift)
157 #define Src2CS (OpCS << Src2Shift)
158 #define Src2SS (OpSS << Src2Shift)
159 #define Src2DS (OpDS << Src2Shift)
160 #define Src2FS (OpFS << Src2Shift)
161 #define Src2GS (OpGS << Src2Shift)
162 #define Src2Mask (OpMask << Src2Shift)
163 #define Mmx ((u64)1 << 40) /* MMX Vector instruction */
164 #define AlignMask ((u64)7 << 41)
165 #define Aligned ((u64)1 << 41) /* Explicitly aligned (e.g. MOVDQA) */
166 #define Unaligned ((u64)2 << 41) /* Explicitly unaligned (e.g. MOVDQU) */
167 #define Avx ((u64)3 << 41) /* Advanced Vector Extensions */
168 #define Aligned16 ((u64)4 << 41) /* Aligned to 16 byte boundary (e.g. FXSAVE) */
169 #define Fastop ((u64)1 << 44) /* Use opcode::u.fastop */
170 #define NoWrite ((u64)1 << 45) /* No writeback */
171 #define SrcWrite ((u64)1 << 46) /* Write back src operand */
172 #define NoMod ((u64)1 << 47) /* Mod field is ignored */
173 #define Intercept ((u64)1 << 48) /* Has valid intercept field */
174 #define CheckPerm ((u64)1 << 49) /* Has valid check_perm field */
175 #define PrivUD ((u64)1 << 51) /* #UD instead of #GP on CPL > 0 */
176 #define NearBranch ((u64)1 << 52) /* Near branches */
177 #define No16 ((u64)1 << 53) /* No 16 bit operand */
178 #define IncSP ((u64)1 << 54) /* SP is incremented before ModRM calc */
179 #define TwoMemOp ((u64)1 << 55) /* Instruction has two memory operand */
181 #define DstXacc (DstAccLo | SrcAccHi | SrcWrite)
183 #define X2(x...) x, x
184 #define X3(x...) X2(x), x
185 #define X4(x...) X2(x), X2(x)
186 #define X5(x...) X4(x), x
187 #define X6(x...) X4(x), X2(x)
188 #define X7(x...) X4(x), X3(x)
189 #define X8(x...) X4(x), X4(x)
190 #define X16(x...) X8(x), X8(x)
192 #define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
193 #define FASTOP_SIZE 8
196 * fastop functions have a special calling convention:
201 * flags: rflags (in/out)
202 * ex: rsi (in:fastop pointer, out:zero if exception)
204 * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
205 * different operand sizes can be reached by calculation, rather than a jump
206 * table (which would be bigger than the code).
208 * fastop functions are declared as taking a never-defined fastop parameter,
209 * so they can't be called from C directly.
218 int (*execute
)(struct x86_emulate_ctxt
*ctxt
);
219 const struct opcode
*group
;
220 const struct group_dual
*gdual
;
221 const struct gprefix
*gprefix
;
222 const struct escape
*esc
;
223 const struct instr_dual
*idual
;
224 const struct mode_dual
*mdual
;
225 void (*fastop
)(struct fastop
*fake
);
227 int (*check_perm
)(struct x86_emulate_ctxt
*ctxt
);
231 struct opcode mod012
[8];
232 struct opcode mod3
[8];
236 struct opcode pfx_no
;
237 struct opcode pfx_66
;
238 struct opcode pfx_f2
;
239 struct opcode pfx_f3
;
244 struct opcode high
[64];
248 struct opcode mod012
;
253 struct opcode mode32
;
254 struct opcode mode64
;
257 #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
259 enum x86_transfer_type
{
261 X86_TRANSFER_CALL_JMP
,
263 X86_TRANSFER_TASK_SWITCH
,
266 static ulong
reg_read(struct x86_emulate_ctxt
*ctxt
, unsigned nr
)
268 if (!(ctxt
->regs_valid
& (1 << nr
))) {
269 ctxt
->regs_valid
|= 1 << nr
;
270 ctxt
->_regs
[nr
] = ctxt
->ops
->read_gpr(ctxt
, nr
);
272 return ctxt
->_regs
[nr
];
275 static ulong
*reg_write(struct x86_emulate_ctxt
*ctxt
, unsigned nr
)
277 ctxt
->regs_valid
|= 1 << nr
;
278 ctxt
->regs_dirty
|= 1 << nr
;
279 return &ctxt
->_regs
[nr
];
282 static ulong
*reg_rmw(struct x86_emulate_ctxt
*ctxt
, unsigned nr
)
285 return reg_write(ctxt
, nr
);
288 static void writeback_registers(struct x86_emulate_ctxt
*ctxt
)
292 for_each_set_bit(reg
, (ulong
*)&ctxt
->regs_dirty
, 16)
293 ctxt
->ops
->write_gpr(ctxt
, reg
, ctxt
->_regs
[reg
]);
296 static void invalidate_registers(struct x86_emulate_ctxt
*ctxt
)
298 ctxt
->regs_dirty
= 0;
299 ctxt
->regs_valid
= 0;
303 * These EFLAGS bits are restored from saved value during emulation, and
304 * any changes are written back to the saved value after emulation.
306 #define EFLAGS_MASK (X86_EFLAGS_OF|X86_EFLAGS_SF|X86_EFLAGS_ZF|X86_EFLAGS_AF|\
307 X86_EFLAGS_PF|X86_EFLAGS_CF)
315 static int fastop(struct x86_emulate_ctxt
*ctxt
, void (*fop
)(struct fastop
*));
317 #define FOP_FUNC(name) \
318 ".align " __stringify(FASTOP_SIZE) " \n\t" \
319 ".type " name ", @function \n\t" \
322 #define FOP_RET "ret \n\t"
324 #define FOP_START(op) \
325 extern void em_##op(struct fastop *fake); \
326 asm(".pushsection .text, \"ax\" \n\t" \
327 ".global em_" #op " \n\t" \
334 FOP_FUNC(__stringify(__UNIQUE_ID(nop))) \
337 #define FOP1E(op, dst) \
338 FOP_FUNC(#op "_" #dst) \
339 "10: " #op " %" #dst " \n\t" FOP_RET
341 #define FOP1EEX(op, dst) \
342 FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception)
344 #define FASTOP1(op) \
349 ON64(FOP1E(op##q, rax)) \
352 /* 1-operand, using src2 (for MUL/DIV r/m) */
353 #define FASTOP1SRC2(op, name) \
358 ON64(FOP1E(op, rcx)) \
361 /* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */
362 #define FASTOP1SRC2EX(op, name) \
367 ON64(FOP1EEX(op, rcx)) \
370 #define FOP2E(op, dst, src) \
371 FOP_FUNC(#op "_" #dst "_" #src) \
372 #op " %" #src ", %" #dst " \n\t" FOP_RET
374 #define FASTOP2(op) \
376 FOP2E(op##b, al, dl) \
377 FOP2E(op##w, ax, dx) \
378 FOP2E(op##l, eax, edx) \
379 ON64(FOP2E(op##q, rax, rdx)) \
382 /* 2 operand, word only */
383 #define FASTOP2W(op) \
386 FOP2E(op##w, ax, dx) \
387 FOP2E(op##l, eax, edx) \
388 ON64(FOP2E(op##q, rax, rdx)) \
391 /* 2 operand, src is CL */
392 #define FASTOP2CL(op) \
394 FOP2E(op##b, al, cl) \
395 FOP2E(op##w, ax, cl) \
396 FOP2E(op##l, eax, cl) \
397 ON64(FOP2E(op##q, rax, cl)) \
400 /* 2 operand, src and dest are reversed */
401 #define FASTOP2R(op, name) \
403 FOP2E(op##b, dl, al) \
404 FOP2E(op##w, dx, ax) \
405 FOP2E(op##l, edx, eax) \
406 ON64(FOP2E(op##q, rdx, rax)) \
409 #define FOP3E(op, dst, src, src2) \
410 FOP_FUNC(#op "_" #dst "_" #src "_" #src2) \
411 #op " %" #src2 ", %" #src ", %" #dst " \n\t" FOP_RET
413 /* 3-operand, word-only, src2=cl */
414 #define FASTOP3WCL(op) \
417 FOP3E(op##w, ax, dx, cl) \
418 FOP3E(op##l, eax, edx, cl) \
419 ON64(FOP3E(op##q, rax, rdx, cl)) \
422 /* Special case for SETcc - 1 instruction per cc */
423 #define FOP_SETCC(op) \
425 ".type " #op ", @function \n\t" \
430 asm(".pushsection .fixup, \"ax\"\n"
431 ".global kvm_fastop_exception \n"
432 "kvm_fastop_exception: xor %esi, %esi; ret\n"
454 FOP_START(salc
) "pushf; sbb %al, %al; popf \n\t" FOP_RET
458 * XXX: inoutclob user must know where the argument is being expanded.
459 * Relying on CC_HAVE_ASM_GOTO would allow us to remove _fault.
461 #define asm_safe(insn, inoutclob...) \
465 asm volatile("1:" insn "\n" \
467 ".pushsection .fixup, \"ax\"\n" \
468 "3: movl $1, %[_fault]\n" \
471 _ASM_EXTABLE(1b, 3b) \
472 : [_fault] "+qm"(_fault) inoutclob ); \
474 _fault ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE; \
477 static int emulator_check_intercept(struct x86_emulate_ctxt
*ctxt
,
478 enum x86_intercept intercept
,
479 enum x86_intercept_stage stage
)
481 struct x86_instruction_info info
= {
482 .intercept
= intercept
,
483 .rep_prefix
= ctxt
->rep_prefix
,
484 .modrm_mod
= ctxt
->modrm_mod
,
485 .modrm_reg
= ctxt
->modrm_reg
,
486 .modrm_rm
= ctxt
->modrm_rm
,
487 .src_val
= ctxt
->src
.val64
,
488 .dst_val
= ctxt
->dst
.val64
,
489 .src_bytes
= ctxt
->src
.bytes
,
490 .dst_bytes
= ctxt
->dst
.bytes
,
491 .ad_bytes
= ctxt
->ad_bytes
,
492 .next_rip
= ctxt
->eip
,
495 return ctxt
->ops
->intercept(ctxt
, &info
, stage
);
498 static void assign_masked(ulong
*dest
, ulong src
, ulong mask
)
500 *dest
= (*dest
& ~mask
) | (src
& mask
);
503 static void assign_register(unsigned long *reg
, u64 val
, int bytes
)
505 /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
508 *(u8
*)reg
= (u8
)val
;
511 *(u16
*)reg
= (u16
)val
;
515 break; /* 64b: zero-extend */
522 static inline unsigned long ad_mask(struct x86_emulate_ctxt
*ctxt
)
524 return (1UL << (ctxt
->ad_bytes
<< 3)) - 1;
527 static ulong
stack_mask(struct x86_emulate_ctxt
*ctxt
)
530 struct desc_struct ss
;
532 if (ctxt
->mode
== X86EMUL_MODE_PROT64
)
534 ctxt
->ops
->get_segment(ctxt
, &sel
, &ss
, NULL
, VCPU_SREG_SS
);
535 return ~0U >> ((ss
.d
^ 1) * 16); /* d=0: 0xffff; d=1: 0xffffffff */
538 static int stack_size(struct x86_emulate_ctxt
*ctxt
)
540 return (__fls(stack_mask(ctxt
)) + 1) >> 3;
543 /* Access/update address held in a register, based on addressing mode. */
544 static inline unsigned long
545 address_mask(struct x86_emulate_ctxt
*ctxt
, unsigned long reg
)
547 if (ctxt
->ad_bytes
== sizeof(unsigned long))
550 return reg
& ad_mask(ctxt
);
553 static inline unsigned long
554 register_address(struct x86_emulate_ctxt
*ctxt
, int reg
)
556 return address_mask(ctxt
, reg_read(ctxt
, reg
));
559 static void masked_increment(ulong
*reg
, ulong mask
, int inc
)
561 assign_masked(reg
, *reg
+ inc
, mask
);
565 register_address_increment(struct x86_emulate_ctxt
*ctxt
, int reg
, int inc
)
567 ulong
*preg
= reg_rmw(ctxt
, reg
);
569 assign_register(preg
, *preg
+ inc
, ctxt
->ad_bytes
);
572 static void rsp_increment(struct x86_emulate_ctxt
*ctxt
, int inc
)
574 masked_increment(reg_rmw(ctxt
, VCPU_REGS_RSP
), stack_mask(ctxt
), inc
);
577 static u32
desc_limit_scaled(struct desc_struct
*desc
)
579 u32 limit
= get_desc_limit(desc
);
581 return desc
->g
? (limit
<< 12) | 0xfff : limit
;
584 static unsigned long seg_base(struct x86_emulate_ctxt
*ctxt
, int seg
)
586 if (ctxt
->mode
== X86EMUL_MODE_PROT64
&& seg
< VCPU_SREG_FS
)
589 return ctxt
->ops
->get_cached_segment_base(ctxt
, seg
);
592 static int emulate_exception(struct x86_emulate_ctxt
*ctxt
, int vec
,
593 u32 error
, bool valid
)
596 ctxt
->exception
.vector
= vec
;
597 ctxt
->exception
.error_code
= error
;
598 ctxt
->exception
.error_code_valid
= valid
;
599 return X86EMUL_PROPAGATE_FAULT
;
602 static int emulate_db(struct x86_emulate_ctxt
*ctxt
)
604 return emulate_exception(ctxt
, DB_VECTOR
, 0, false);
607 static int emulate_gp(struct x86_emulate_ctxt
*ctxt
, int err
)
609 return emulate_exception(ctxt
, GP_VECTOR
, err
, true);
612 static int emulate_ss(struct x86_emulate_ctxt
*ctxt
, int err
)
614 return emulate_exception(ctxt
, SS_VECTOR
, err
, true);
617 static int emulate_ud(struct x86_emulate_ctxt
*ctxt
)
619 return emulate_exception(ctxt
, UD_VECTOR
, 0, false);
622 static int emulate_ts(struct x86_emulate_ctxt
*ctxt
, int err
)
624 return emulate_exception(ctxt
, TS_VECTOR
, err
, true);
627 static int emulate_de(struct x86_emulate_ctxt
*ctxt
)
629 return emulate_exception(ctxt
, DE_VECTOR
, 0, false);
632 static int emulate_nm(struct x86_emulate_ctxt
*ctxt
)
634 return emulate_exception(ctxt
, NM_VECTOR
, 0, false);
637 static u16
get_segment_selector(struct x86_emulate_ctxt
*ctxt
, unsigned seg
)
640 struct desc_struct desc
;
642 ctxt
->ops
->get_segment(ctxt
, &selector
, &desc
, NULL
, seg
);
646 static void set_segment_selector(struct x86_emulate_ctxt
*ctxt
, u16 selector
,
651 struct desc_struct desc
;
653 ctxt
->ops
->get_segment(ctxt
, &dummy
, &desc
, &base3
, seg
);
654 ctxt
->ops
->set_segment(ctxt
, selector
, &desc
, base3
, seg
);
658 * x86 defines three classes of vector instructions: explicitly
659 * aligned, explicitly unaligned, and the rest, which change behaviour
660 * depending on whether they're AVX encoded or not.
662 * Also included is CMPXCHG16B which is not a vector instruction, yet it is
663 * subject to the same check. FXSAVE and FXRSTOR are checked here too as their
664 * 512 bytes of data must be aligned to a 16 byte boundary.
666 static unsigned insn_alignment(struct x86_emulate_ctxt
*ctxt
, unsigned size
)
668 u64 alignment
= ctxt
->d
& AlignMask
;
670 if (likely(size
< 16))
685 static __always_inline
int __linearize(struct x86_emulate_ctxt
*ctxt
,
686 struct segmented_address addr
,
687 unsigned *max_size
, unsigned size
,
688 bool write
, bool fetch
,
689 enum x86emul_mode mode
, ulong
*linear
)
691 struct desc_struct desc
;
698 la
= seg_base(ctxt
, addr
.seg
) + addr
.ea
;
701 case X86EMUL_MODE_PROT64
:
703 va_bits
= ctxt_virt_addr_bits(ctxt
);
704 if (get_canonical(la
, va_bits
) != la
)
707 *max_size
= min_t(u64
, ~0u, (1ull << va_bits
) - la
);
708 if (size
> *max_size
)
712 *linear
= la
= (u32
)la
;
713 usable
= ctxt
->ops
->get_segment(ctxt
, &sel
, &desc
, NULL
,
717 /* code segment in protected mode or read-only data segment */
718 if ((((ctxt
->mode
!= X86EMUL_MODE_REAL
) && (desc
.type
& 8))
719 || !(desc
.type
& 2)) && write
)
721 /* unreadable code segment */
722 if (!fetch
&& (desc
.type
& 8) && !(desc
.type
& 2))
724 lim
= desc_limit_scaled(&desc
);
725 if (!(desc
.type
& 8) && (desc
.type
& 4)) {
726 /* expand-down segment */
729 lim
= desc
.d
? 0xffffffff : 0xffff;
733 if (lim
== 0xffffffff)
736 *max_size
= (u64
)lim
+ 1 - addr
.ea
;
737 if (size
> *max_size
)
742 if (la
& (insn_alignment(ctxt
, size
) - 1))
743 return emulate_gp(ctxt
, 0);
744 return X86EMUL_CONTINUE
;
746 if (addr
.seg
== VCPU_SREG_SS
)
747 return emulate_ss(ctxt
, 0);
749 return emulate_gp(ctxt
, 0);
752 static int linearize(struct x86_emulate_ctxt
*ctxt
,
753 struct segmented_address addr
,
754 unsigned size
, bool write
,
758 return __linearize(ctxt
, addr
, &max_size
, size
, write
, false,
762 static inline int assign_eip(struct x86_emulate_ctxt
*ctxt
, ulong dst
,
763 enum x86emul_mode mode
)
768 struct segmented_address addr
= { .seg
= VCPU_SREG_CS
,
771 if (ctxt
->op_bytes
!= sizeof(unsigned long))
772 addr
.ea
= dst
& ((1UL << (ctxt
->op_bytes
<< 3)) - 1);
773 rc
= __linearize(ctxt
, addr
, &max_size
, 1, false, true, mode
, &linear
);
774 if (rc
== X86EMUL_CONTINUE
)
775 ctxt
->_eip
= addr
.ea
;
779 static inline int assign_eip_near(struct x86_emulate_ctxt
*ctxt
, ulong dst
)
781 return assign_eip(ctxt
, dst
, ctxt
->mode
);
784 static int assign_eip_far(struct x86_emulate_ctxt
*ctxt
, ulong dst
,
785 const struct desc_struct
*cs_desc
)
787 enum x86emul_mode mode
= ctxt
->mode
;
791 if (ctxt
->mode
>= X86EMUL_MODE_PROT16
) {
795 ctxt
->ops
->get_msr(ctxt
, MSR_EFER
, &efer
);
797 mode
= X86EMUL_MODE_PROT64
;
799 mode
= X86EMUL_MODE_PROT32
; /* temporary value */
802 if (mode
== X86EMUL_MODE_PROT16
|| mode
== X86EMUL_MODE_PROT32
)
803 mode
= cs_desc
->d
? X86EMUL_MODE_PROT32
: X86EMUL_MODE_PROT16
;
804 rc
= assign_eip(ctxt
, dst
, mode
);
805 if (rc
== X86EMUL_CONTINUE
)
810 static inline int jmp_rel(struct x86_emulate_ctxt
*ctxt
, int rel
)
812 return assign_eip_near(ctxt
, ctxt
->_eip
+ rel
);
815 static int linear_read_system(struct x86_emulate_ctxt
*ctxt
, ulong linear
,
816 void *data
, unsigned size
)
818 return ctxt
->ops
->read_std(ctxt
, linear
, data
, size
, &ctxt
->exception
, true);
821 static int linear_write_system(struct x86_emulate_ctxt
*ctxt
,
822 ulong linear
, void *data
,
825 return ctxt
->ops
->write_std(ctxt
, linear
, data
, size
, &ctxt
->exception
, true);
828 static int segmented_read_std(struct x86_emulate_ctxt
*ctxt
,
829 struct segmented_address addr
,
836 rc
= linearize(ctxt
, addr
, size
, false, &linear
);
837 if (rc
!= X86EMUL_CONTINUE
)
839 return ctxt
->ops
->read_std(ctxt
, linear
, data
, size
, &ctxt
->exception
, false);
842 static int segmented_write_std(struct x86_emulate_ctxt
*ctxt
,
843 struct segmented_address addr
,
850 rc
= linearize(ctxt
, addr
, size
, true, &linear
);
851 if (rc
!= X86EMUL_CONTINUE
)
853 return ctxt
->ops
->write_std(ctxt
, linear
, data
, size
, &ctxt
->exception
, false);
857 * Prefetch the remaining bytes of the instruction without crossing page
858 * boundary if they are not in fetch_cache yet.
860 static int __do_insn_fetch_bytes(struct x86_emulate_ctxt
*ctxt
, int op_size
)
863 unsigned size
, max_size
;
864 unsigned long linear
;
865 int cur_size
= ctxt
->fetch
.end
- ctxt
->fetch
.data
;
866 struct segmented_address addr
= { .seg
= VCPU_SREG_CS
,
867 .ea
= ctxt
->eip
+ cur_size
};
870 * We do not know exactly how many bytes will be needed, and
871 * __linearize is expensive, so fetch as much as possible. We
872 * just have to avoid going beyond the 15 byte limit, the end
873 * of the segment, or the end of the page.
875 * __linearize is called with size 0 so that it does not do any
876 * boundary check itself. Instead, we use max_size to check
879 rc
= __linearize(ctxt
, addr
, &max_size
, 0, false, true, ctxt
->mode
,
881 if (unlikely(rc
!= X86EMUL_CONTINUE
))
884 size
= min_t(unsigned, 15UL ^ cur_size
, max_size
);
885 size
= min_t(unsigned, size
, PAGE_SIZE
- offset_in_page(linear
));
888 * One instruction can only straddle two pages,
889 * and one has been loaded at the beginning of
890 * x86_decode_insn. So, if not enough bytes
891 * still, we must have hit the 15-byte boundary.
893 if (unlikely(size
< op_size
))
894 return emulate_gp(ctxt
, 0);
896 rc
= ctxt
->ops
->fetch(ctxt
, linear
, ctxt
->fetch
.end
,
897 size
, &ctxt
->exception
);
898 if (unlikely(rc
!= X86EMUL_CONTINUE
))
900 ctxt
->fetch
.end
+= size
;
901 return X86EMUL_CONTINUE
;
904 static __always_inline
int do_insn_fetch_bytes(struct x86_emulate_ctxt
*ctxt
,
907 unsigned done_size
= ctxt
->fetch
.end
- ctxt
->fetch
.ptr
;
909 if (unlikely(done_size
< size
))
910 return __do_insn_fetch_bytes(ctxt
, size
- done_size
);
912 return X86EMUL_CONTINUE
;
915 /* Fetch next part of the instruction being emulated. */
916 #define insn_fetch(_type, _ctxt) \
919 rc = do_insn_fetch_bytes(_ctxt, sizeof(_type)); \
920 if (rc != X86EMUL_CONTINUE) \
922 ctxt->_eip += sizeof(_type); \
923 memcpy(&_x, ctxt->fetch.ptr, sizeof(_type)); \
924 ctxt->fetch.ptr += sizeof(_type); \
928 #define insn_fetch_arr(_arr, _size, _ctxt) \
930 rc = do_insn_fetch_bytes(_ctxt, _size); \
931 if (rc != X86EMUL_CONTINUE) \
933 ctxt->_eip += (_size); \
934 memcpy(_arr, ctxt->fetch.ptr, _size); \
935 ctxt->fetch.ptr += (_size); \
939 * Given the 'reg' portion of a ModRM byte, and a register block, return a
940 * pointer into the block that addresses the relevant register.
941 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
943 static void *decode_register(struct x86_emulate_ctxt
*ctxt
, u8 modrm_reg
,
947 int highbyte_regs
= (ctxt
->rex_prefix
== 0) && byteop
;
949 if (highbyte_regs
&& modrm_reg
>= 4 && modrm_reg
< 8)
950 p
= (unsigned char *)reg_rmw(ctxt
, modrm_reg
& 3) + 1;
952 p
= reg_rmw(ctxt
, modrm_reg
);
956 static int read_descriptor(struct x86_emulate_ctxt
*ctxt
,
957 struct segmented_address addr
,
958 u16
*size
, unsigned long *address
, int op_bytes
)
965 rc
= segmented_read_std(ctxt
, addr
, size
, 2);
966 if (rc
!= X86EMUL_CONTINUE
)
969 rc
= segmented_read_std(ctxt
, addr
, address
, op_bytes
);
983 FASTOP1SRC2(mul
, mul_ex
);
984 FASTOP1SRC2(imul
, imul_ex
);
985 FASTOP1SRC2EX(div
, div_ex
);
986 FASTOP1SRC2EX(idiv
, idiv_ex
);
1015 FASTOP2R(cmp
, cmp_r
);
1017 static int em_bsf_c(struct x86_emulate_ctxt
*ctxt
)
1019 /* If src is zero, do not writeback, but update flags */
1020 if (ctxt
->src
.val
== 0)
1021 ctxt
->dst
.type
= OP_NONE
;
1022 return fastop(ctxt
, em_bsf
);
1025 static int em_bsr_c(struct x86_emulate_ctxt
*ctxt
)
1027 /* If src is zero, do not writeback, but update flags */
1028 if (ctxt
->src
.val
== 0)
1029 ctxt
->dst
.type
= OP_NONE
;
1030 return fastop(ctxt
, em_bsr
);
1033 static __always_inline u8
test_cc(unsigned int condition
, unsigned long flags
)
1036 void (*fop
)(void) = (void *)em_setcc
+ 4 * (condition
& 0xf);
1038 flags
= (flags
& EFLAGS_MASK
) | X86_EFLAGS_IF
;
1039 asm("push %[flags]; popf; " CALL_NOSPEC
1040 : "=a"(rc
) : [thunk_target
]"r"(fop
), [flags
]"r"(flags
));
1044 static void fetch_register_operand(struct operand
*op
)
1046 switch (op
->bytes
) {
1048 op
->val
= *(u8
*)op
->addr
.reg
;
1051 op
->val
= *(u16
*)op
->addr
.reg
;
1054 op
->val
= *(u32
*)op
->addr
.reg
;
1057 op
->val
= *(u64
*)op
->addr
.reg
;
1062 static void read_sse_reg(struct x86_emulate_ctxt
*ctxt
, sse128_t
*data
, int reg
)
1065 case 0: asm("movdqa %%xmm0, %0" : "=m"(*data
)); break;
1066 case 1: asm("movdqa %%xmm1, %0" : "=m"(*data
)); break;
1067 case 2: asm("movdqa %%xmm2, %0" : "=m"(*data
)); break;
1068 case 3: asm("movdqa %%xmm3, %0" : "=m"(*data
)); break;
1069 case 4: asm("movdqa %%xmm4, %0" : "=m"(*data
)); break;
1070 case 5: asm("movdqa %%xmm5, %0" : "=m"(*data
)); break;
1071 case 6: asm("movdqa %%xmm6, %0" : "=m"(*data
)); break;
1072 case 7: asm("movdqa %%xmm7, %0" : "=m"(*data
)); break;
1073 #ifdef CONFIG_X86_64
1074 case 8: asm("movdqa %%xmm8, %0" : "=m"(*data
)); break;
1075 case 9: asm("movdqa %%xmm9, %0" : "=m"(*data
)); break;
1076 case 10: asm("movdqa %%xmm10, %0" : "=m"(*data
)); break;
1077 case 11: asm("movdqa %%xmm11, %0" : "=m"(*data
)); break;
1078 case 12: asm("movdqa %%xmm12, %0" : "=m"(*data
)); break;
1079 case 13: asm("movdqa %%xmm13, %0" : "=m"(*data
)); break;
1080 case 14: asm("movdqa %%xmm14, %0" : "=m"(*data
)); break;
1081 case 15: asm("movdqa %%xmm15, %0" : "=m"(*data
)); break;
1087 static void write_sse_reg(struct x86_emulate_ctxt
*ctxt
, sse128_t
*data
,
1091 case 0: asm("movdqa %0, %%xmm0" : : "m"(*data
)); break;
1092 case 1: asm("movdqa %0, %%xmm1" : : "m"(*data
)); break;
1093 case 2: asm("movdqa %0, %%xmm2" : : "m"(*data
)); break;
1094 case 3: asm("movdqa %0, %%xmm3" : : "m"(*data
)); break;
1095 case 4: asm("movdqa %0, %%xmm4" : : "m"(*data
)); break;
1096 case 5: asm("movdqa %0, %%xmm5" : : "m"(*data
)); break;
1097 case 6: asm("movdqa %0, %%xmm6" : : "m"(*data
)); break;
1098 case 7: asm("movdqa %0, %%xmm7" : : "m"(*data
)); break;
1099 #ifdef CONFIG_X86_64
1100 case 8: asm("movdqa %0, %%xmm8" : : "m"(*data
)); break;
1101 case 9: asm("movdqa %0, %%xmm9" : : "m"(*data
)); break;
1102 case 10: asm("movdqa %0, %%xmm10" : : "m"(*data
)); break;
1103 case 11: asm("movdqa %0, %%xmm11" : : "m"(*data
)); break;
1104 case 12: asm("movdqa %0, %%xmm12" : : "m"(*data
)); break;
1105 case 13: asm("movdqa %0, %%xmm13" : : "m"(*data
)); break;
1106 case 14: asm("movdqa %0, %%xmm14" : : "m"(*data
)); break;
1107 case 15: asm("movdqa %0, %%xmm15" : : "m"(*data
)); break;
1113 static void read_mmx_reg(struct x86_emulate_ctxt
*ctxt
, u64
*data
, int reg
)
1116 case 0: asm("movq %%mm0, %0" : "=m"(*data
)); break;
1117 case 1: asm("movq %%mm1, %0" : "=m"(*data
)); break;
1118 case 2: asm("movq %%mm2, %0" : "=m"(*data
)); break;
1119 case 3: asm("movq %%mm3, %0" : "=m"(*data
)); break;
1120 case 4: asm("movq %%mm4, %0" : "=m"(*data
)); break;
1121 case 5: asm("movq %%mm5, %0" : "=m"(*data
)); break;
1122 case 6: asm("movq %%mm6, %0" : "=m"(*data
)); break;
1123 case 7: asm("movq %%mm7, %0" : "=m"(*data
)); break;
1128 static void write_mmx_reg(struct x86_emulate_ctxt
*ctxt
, u64
*data
, int reg
)
1131 case 0: asm("movq %0, %%mm0" : : "m"(*data
)); break;
1132 case 1: asm("movq %0, %%mm1" : : "m"(*data
)); break;
1133 case 2: asm("movq %0, %%mm2" : : "m"(*data
)); break;
1134 case 3: asm("movq %0, %%mm3" : : "m"(*data
)); break;
1135 case 4: asm("movq %0, %%mm4" : : "m"(*data
)); break;
1136 case 5: asm("movq %0, %%mm5" : : "m"(*data
)); break;
1137 case 6: asm("movq %0, %%mm6" : : "m"(*data
)); break;
1138 case 7: asm("movq %0, %%mm7" : : "m"(*data
)); break;
1143 static int em_fninit(struct x86_emulate_ctxt
*ctxt
)
1145 if (ctxt
->ops
->get_cr(ctxt
, 0) & (X86_CR0_TS
| X86_CR0_EM
))
1146 return emulate_nm(ctxt
);
1148 asm volatile("fninit");
1149 return X86EMUL_CONTINUE
;
1152 static int em_fnstcw(struct x86_emulate_ctxt
*ctxt
)
1156 if (ctxt
->ops
->get_cr(ctxt
, 0) & (X86_CR0_TS
| X86_CR0_EM
))
1157 return emulate_nm(ctxt
);
1159 asm volatile("fnstcw %0": "+m"(fcw
));
1161 ctxt
->dst
.val
= fcw
;
1163 return X86EMUL_CONTINUE
;
1166 static int em_fnstsw(struct x86_emulate_ctxt
*ctxt
)
1170 if (ctxt
->ops
->get_cr(ctxt
, 0) & (X86_CR0_TS
| X86_CR0_EM
))
1171 return emulate_nm(ctxt
);
1173 asm volatile("fnstsw %0": "+m"(fsw
));
1175 ctxt
->dst
.val
= fsw
;
1177 return X86EMUL_CONTINUE
;
1180 static void decode_register_operand(struct x86_emulate_ctxt
*ctxt
,
1183 unsigned reg
= ctxt
->modrm_reg
;
1185 if (!(ctxt
->d
& ModRM
))
1186 reg
= (ctxt
->b
& 7) | ((ctxt
->rex_prefix
& 1) << 3);
1188 if (ctxt
->d
& Sse
) {
1192 read_sse_reg(ctxt
, &op
->vec_val
, reg
);
1195 if (ctxt
->d
& Mmx
) {
1204 op
->bytes
= (ctxt
->d
& ByteOp
) ? 1 : ctxt
->op_bytes
;
1205 op
->addr
.reg
= decode_register(ctxt
, reg
, ctxt
->d
& ByteOp
);
1207 fetch_register_operand(op
);
1208 op
->orig_val
= op
->val
;
1211 static void adjust_modrm_seg(struct x86_emulate_ctxt
*ctxt
, int base_reg
)
1213 if (base_reg
== VCPU_REGS_RSP
|| base_reg
== VCPU_REGS_RBP
)
1214 ctxt
->modrm_seg
= VCPU_SREG_SS
;
1217 static int decode_modrm(struct x86_emulate_ctxt
*ctxt
,
1221 int index_reg
, base_reg
, scale
;
1222 int rc
= X86EMUL_CONTINUE
;
1225 ctxt
->modrm_reg
= ((ctxt
->rex_prefix
<< 1) & 8); /* REX.R */
1226 index_reg
= (ctxt
->rex_prefix
<< 2) & 8; /* REX.X */
1227 base_reg
= (ctxt
->rex_prefix
<< 3) & 8; /* REX.B */
1229 ctxt
->modrm_mod
= (ctxt
->modrm
& 0xc0) >> 6;
1230 ctxt
->modrm_reg
|= (ctxt
->modrm
& 0x38) >> 3;
1231 ctxt
->modrm_rm
= base_reg
| (ctxt
->modrm
& 0x07);
1232 ctxt
->modrm_seg
= VCPU_SREG_DS
;
1234 if (ctxt
->modrm_mod
== 3 || (ctxt
->d
& NoMod
)) {
1236 op
->bytes
= (ctxt
->d
& ByteOp
) ? 1 : ctxt
->op_bytes
;
1237 op
->addr
.reg
= decode_register(ctxt
, ctxt
->modrm_rm
,
1239 if (ctxt
->d
& Sse
) {
1242 op
->addr
.xmm
= ctxt
->modrm_rm
;
1243 read_sse_reg(ctxt
, &op
->vec_val
, ctxt
->modrm_rm
);
1246 if (ctxt
->d
& Mmx
) {
1249 op
->addr
.mm
= ctxt
->modrm_rm
& 7;
1252 fetch_register_operand(op
);
1258 if (ctxt
->ad_bytes
== 2) {
1259 unsigned bx
= reg_read(ctxt
, VCPU_REGS_RBX
);
1260 unsigned bp
= reg_read(ctxt
, VCPU_REGS_RBP
);
1261 unsigned si
= reg_read(ctxt
, VCPU_REGS_RSI
);
1262 unsigned di
= reg_read(ctxt
, VCPU_REGS_RDI
);
1264 /* 16-bit ModR/M decode. */
1265 switch (ctxt
->modrm_mod
) {
1267 if (ctxt
->modrm_rm
== 6)
1268 modrm_ea
+= insn_fetch(u16
, ctxt
);
1271 modrm_ea
+= insn_fetch(s8
, ctxt
);
1274 modrm_ea
+= insn_fetch(u16
, ctxt
);
1277 switch (ctxt
->modrm_rm
) {
1279 modrm_ea
+= bx
+ si
;
1282 modrm_ea
+= bx
+ di
;
1285 modrm_ea
+= bp
+ si
;
1288 modrm_ea
+= bp
+ di
;
1297 if (ctxt
->modrm_mod
!= 0)
1304 if (ctxt
->modrm_rm
== 2 || ctxt
->modrm_rm
== 3 ||
1305 (ctxt
->modrm_rm
== 6 && ctxt
->modrm_mod
!= 0))
1306 ctxt
->modrm_seg
= VCPU_SREG_SS
;
1307 modrm_ea
= (u16
)modrm_ea
;
1309 /* 32/64-bit ModR/M decode. */
1310 if ((ctxt
->modrm_rm
& 7) == 4) {
1311 sib
= insn_fetch(u8
, ctxt
);
1312 index_reg
|= (sib
>> 3) & 7;
1313 base_reg
|= sib
& 7;
1316 if ((base_reg
& 7) == 5 && ctxt
->modrm_mod
== 0)
1317 modrm_ea
+= insn_fetch(s32
, ctxt
);
1319 modrm_ea
+= reg_read(ctxt
, base_reg
);
1320 adjust_modrm_seg(ctxt
, base_reg
);
1321 /* Increment ESP on POP [ESP] */
1322 if ((ctxt
->d
& IncSP
) &&
1323 base_reg
== VCPU_REGS_RSP
)
1324 modrm_ea
+= ctxt
->op_bytes
;
1327 modrm_ea
+= reg_read(ctxt
, index_reg
) << scale
;
1328 } else if ((ctxt
->modrm_rm
& 7) == 5 && ctxt
->modrm_mod
== 0) {
1329 modrm_ea
+= insn_fetch(s32
, ctxt
);
1330 if (ctxt
->mode
== X86EMUL_MODE_PROT64
)
1331 ctxt
->rip_relative
= 1;
1333 base_reg
= ctxt
->modrm_rm
;
1334 modrm_ea
+= reg_read(ctxt
, base_reg
);
1335 adjust_modrm_seg(ctxt
, base_reg
);
1337 switch (ctxt
->modrm_mod
) {
1339 modrm_ea
+= insn_fetch(s8
, ctxt
);
1342 modrm_ea
+= insn_fetch(s32
, ctxt
);
1346 op
->addr
.mem
.ea
= modrm_ea
;
1347 if (ctxt
->ad_bytes
!= 8)
1348 ctxt
->memop
.addr
.mem
.ea
= (u32
)ctxt
->memop
.addr
.mem
.ea
;
1354 static int decode_abs(struct x86_emulate_ctxt
*ctxt
,
1357 int rc
= X86EMUL_CONTINUE
;
1360 switch (ctxt
->ad_bytes
) {
1362 op
->addr
.mem
.ea
= insn_fetch(u16
, ctxt
);
1365 op
->addr
.mem
.ea
= insn_fetch(u32
, ctxt
);
1368 op
->addr
.mem
.ea
= insn_fetch(u64
, ctxt
);
1375 static void fetch_bit_operand(struct x86_emulate_ctxt
*ctxt
)
1379 if (ctxt
->dst
.type
== OP_MEM
&& ctxt
->src
.type
== OP_REG
) {
1380 mask
= ~((long)ctxt
->dst
.bytes
* 8 - 1);
1382 if (ctxt
->src
.bytes
== 2)
1383 sv
= (s16
)ctxt
->src
.val
& (s16
)mask
;
1384 else if (ctxt
->src
.bytes
== 4)
1385 sv
= (s32
)ctxt
->src
.val
& (s32
)mask
;
1387 sv
= (s64
)ctxt
->src
.val
& (s64
)mask
;
1389 ctxt
->dst
.addr
.mem
.ea
= address_mask(ctxt
,
1390 ctxt
->dst
.addr
.mem
.ea
+ (sv
>> 3));
1393 /* only subword offset */
1394 ctxt
->src
.val
&= (ctxt
->dst
.bytes
<< 3) - 1;
1397 static int read_emulated(struct x86_emulate_ctxt
*ctxt
,
1398 unsigned long addr
, void *dest
, unsigned size
)
1401 struct read_cache
*mc
= &ctxt
->mem_read
;
1403 if (mc
->pos
< mc
->end
)
1406 WARN_ON((mc
->end
+ size
) >= sizeof(mc
->data
));
1408 rc
= ctxt
->ops
->read_emulated(ctxt
, addr
, mc
->data
+ mc
->end
, size
,
1410 if (rc
!= X86EMUL_CONTINUE
)
1416 memcpy(dest
, mc
->data
+ mc
->pos
, size
);
1418 return X86EMUL_CONTINUE
;
1421 static int segmented_read(struct x86_emulate_ctxt
*ctxt
,
1422 struct segmented_address addr
,
1429 rc
= linearize(ctxt
, addr
, size
, false, &linear
);
1430 if (rc
!= X86EMUL_CONTINUE
)
1432 return read_emulated(ctxt
, linear
, data
, size
);
1435 static int segmented_write(struct x86_emulate_ctxt
*ctxt
,
1436 struct segmented_address addr
,
1443 rc
= linearize(ctxt
, addr
, size
, true, &linear
);
1444 if (rc
!= X86EMUL_CONTINUE
)
1446 return ctxt
->ops
->write_emulated(ctxt
, linear
, data
, size
,
1450 static int segmented_cmpxchg(struct x86_emulate_ctxt
*ctxt
,
1451 struct segmented_address addr
,
1452 const void *orig_data
, const void *data
,
1458 rc
= linearize(ctxt
, addr
, size
, true, &linear
);
1459 if (rc
!= X86EMUL_CONTINUE
)
1461 return ctxt
->ops
->cmpxchg_emulated(ctxt
, linear
, orig_data
, data
,
1462 size
, &ctxt
->exception
);
1465 static int pio_in_emulated(struct x86_emulate_ctxt
*ctxt
,
1466 unsigned int size
, unsigned short port
,
1469 struct read_cache
*rc
= &ctxt
->io_read
;
1471 if (rc
->pos
== rc
->end
) { /* refill pio read ahead */
1472 unsigned int in_page
, n
;
1473 unsigned int count
= ctxt
->rep_prefix
?
1474 address_mask(ctxt
, reg_read(ctxt
, VCPU_REGS_RCX
)) : 1;
1475 in_page
= (ctxt
->eflags
& X86_EFLAGS_DF
) ?
1476 offset_in_page(reg_read(ctxt
, VCPU_REGS_RDI
)) :
1477 PAGE_SIZE
- offset_in_page(reg_read(ctxt
, VCPU_REGS_RDI
));
1478 n
= min3(in_page
, (unsigned int)sizeof(rc
->data
) / size
, count
);
1481 rc
->pos
= rc
->end
= 0;
1482 if (!ctxt
->ops
->pio_in_emulated(ctxt
, size
, port
, rc
->data
, n
))
1487 if (ctxt
->rep_prefix
&& (ctxt
->d
& String
) &&
1488 !(ctxt
->eflags
& X86_EFLAGS_DF
)) {
1489 ctxt
->dst
.data
= rc
->data
+ rc
->pos
;
1490 ctxt
->dst
.type
= OP_MEM_STR
;
1491 ctxt
->dst
.count
= (rc
->end
- rc
->pos
) / size
;
1494 memcpy(dest
, rc
->data
+ rc
->pos
, size
);
1500 static int read_interrupt_descriptor(struct x86_emulate_ctxt
*ctxt
,
1501 u16 index
, struct desc_struct
*desc
)
1506 ctxt
->ops
->get_idt(ctxt
, &dt
);
1508 if (dt
.size
< index
* 8 + 7)
1509 return emulate_gp(ctxt
, index
<< 3 | 0x2);
1511 addr
= dt
.address
+ index
* 8;
1512 return linear_read_system(ctxt
, addr
, desc
, sizeof(*desc
));
1515 static void get_descriptor_table_ptr(struct x86_emulate_ctxt
*ctxt
,
1516 u16 selector
, struct desc_ptr
*dt
)
1518 const struct x86_emulate_ops
*ops
= ctxt
->ops
;
1521 if (selector
& 1 << 2) {
1522 struct desc_struct desc
;
1525 memset(dt
, 0, sizeof(*dt
));
1526 if (!ops
->get_segment(ctxt
, &sel
, &desc
, &base3
,
1530 dt
->size
= desc_limit_scaled(&desc
); /* what if limit > 65535? */
1531 dt
->address
= get_desc_base(&desc
) | ((u64
)base3
<< 32);
1533 ops
->get_gdt(ctxt
, dt
);
1536 static int get_descriptor_ptr(struct x86_emulate_ctxt
*ctxt
,
1537 u16 selector
, ulong
*desc_addr_p
)
1540 u16 index
= selector
>> 3;
1543 get_descriptor_table_ptr(ctxt
, selector
, &dt
);
1545 if (dt
.size
< index
* 8 + 7)
1546 return emulate_gp(ctxt
, selector
& 0xfffc);
1548 addr
= dt
.address
+ index
* 8;
1550 #ifdef CONFIG_X86_64
1551 if (addr
>> 32 != 0) {
1554 ctxt
->ops
->get_msr(ctxt
, MSR_EFER
, &efer
);
1555 if (!(efer
& EFER_LMA
))
1560 *desc_addr_p
= addr
;
1561 return X86EMUL_CONTINUE
;
1564 /* allowed just for 8 bytes segments */
1565 static int read_segment_descriptor(struct x86_emulate_ctxt
*ctxt
,
1566 u16 selector
, struct desc_struct
*desc
,
1571 rc
= get_descriptor_ptr(ctxt
, selector
, desc_addr_p
);
1572 if (rc
!= X86EMUL_CONTINUE
)
1575 return linear_read_system(ctxt
, *desc_addr_p
, desc
, sizeof(*desc
));
1578 /* allowed just for 8 bytes segments */
1579 static int write_segment_descriptor(struct x86_emulate_ctxt
*ctxt
,
1580 u16 selector
, struct desc_struct
*desc
)
1585 rc
= get_descriptor_ptr(ctxt
, selector
, &addr
);
1586 if (rc
!= X86EMUL_CONTINUE
)
1589 return linear_write_system(ctxt
, addr
, desc
, sizeof(*desc
));
1592 static int __load_segment_descriptor(struct x86_emulate_ctxt
*ctxt
,
1593 u16 selector
, int seg
, u8 cpl
,
1594 enum x86_transfer_type transfer
,
1595 struct desc_struct
*desc
)
1597 struct desc_struct seg_desc
, old_desc
;
1599 unsigned err_vec
= GP_VECTOR
;
1601 bool null_selector
= !(selector
& ~0x3); /* 0000-0003 are null */
1607 memset(&seg_desc
, 0, sizeof(seg_desc
));
1609 if (ctxt
->mode
== X86EMUL_MODE_REAL
) {
1610 /* set real mode segment descriptor (keep limit etc. for
1612 ctxt
->ops
->get_segment(ctxt
, &dummy
, &seg_desc
, NULL
, seg
);
1613 set_desc_base(&seg_desc
, selector
<< 4);
1615 } else if (seg
<= VCPU_SREG_GS
&& ctxt
->mode
== X86EMUL_MODE_VM86
) {
1616 /* VM86 needs a clean new segment descriptor */
1617 set_desc_base(&seg_desc
, selector
<< 4);
1618 set_desc_limit(&seg_desc
, 0xffff);
1628 /* TR should be in GDT only */
1629 if (seg
== VCPU_SREG_TR
&& (selector
& (1 << 2)))
1632 /* NULL selector is not valid for TR, CS and (except for long mode) SS */
1633 if (null_selector
) {
1634 if (seg
== VCPU_SREG_CS
|| seg
== VCPU_SREG_TR
)
1637 if (seg
== VCPU_SREG_SS
) {
1638 if (ctxt
->mode
!= X86EMUL_MODE_PROT64
|| rpl
!= cpl
)
1642 * ctxt->ops->set_segment expects the CPL to be in
1643 * SS.DPL, so fake an expand-up 32-bit data segment.
1653 /* Skip all following checks */
1657 ret
= read_segment_descriptor(ctxt
, selector
, &seg_desc
, &desc_addr
);
1658 if (ret
!= X86EMUL_CONTINUE
)
1661 err_code
= selector
& 0xfffc;
1662 err_vec
= (transfer
== X86_TRANSFER_TASK_SWITCH
) ? TS_VECTOR
:
1665 /* can't load system descriptor into segment selector */
1666 if (seg
<= VCPU_SREG_GS
&& !seg_desc
.s
) {
1667 if (transfer
== X86_TRANSFER_CALL_JMP
)
1668 return X86EMUL_UNHANDLEABLE
;
1673 err_vec
= (seg
== VCPU_SREG_SS
) ? SS_VECTOR
: NP_VECTOR
;
1682 * segment is not a writable data segment or segment
1683 * selector's RPL != CPL or segment selector's RPL != CPL
1685 if (rpl
!= cpl
|| (seg_desc
.type
& 0xa) != 0x2 || dpl
!= cpl
)
1689 if (!(seg_desc
.type
& 8))
1692 if (seg_desc
.type
& 4) {
1698 if (rpl
> cpl
|| dpl
!= cpl
)
1701 /* in long-mode d/b must be clear if l is set */
1702 if (seg_desc
.d
&& seg_desc
.l
) {
1705 ctxt
->ops
->get_msr(ctxt
, MSR_EFER
, &efer
);
1706 if (efer
& EFER_LMA
)
1710 /* CS(RPL) <- CPL */
1711 selector
= (selector
& 0xfffc) | cpl
;
1714 if (seg_desc
.s
|| (seg_desc
.type
!= 1 && seg_desc
.type
!= 9))
1716 old_desc
= seg_desc
;
1717 seg_desc
.type
|= 2; /* busy */
1718 ret
= ctxt
->ops
->cmpxchg_emulated(ctxt
, desc_addr
, &old_desc
, &seg_desc
,
1719 sizeof(seg_desc
), &ctxt
->exception
);
1720 if (ret
!= X86EMUL_CONTINUE
)
1723 case VCPU_SREG_LDTR
:
1724 if (seg_desc
.s
|| seg_desc
.type
!= 2)
1727 default: /* DS, ES, FS, or GS */
1729 * segment is not a data or readable code segment or
1730 * ((segment is a data or nonconforming code segment)
1731 * and (both RPL and CPL > DPL))
1733 if ((seg_desc
.type
& 0xa) == 0x8 ||
1734 (((seg_desc
.type
& 0xc) != 0xc) &&
1735 (rpl
> dpl
&& cpl
> dpl
)))
1741 /* mark segment as accessed */
1742 if (!(seg_desc
.type
& 1)) {
1744 ret
= write_segment_descriptor(ctxt
, selector
,
1746 if (ret
!= X86EMUL_CONTINUE
)
1749 } else if (ctxt
->mode
== X86EMUL_MODE_PROT64
) {
1750 ret
= linear_read_system(ctxt
, desc_addr
+8, &base3
, sizeof(base3
));
1751 if (ret
!= X86EMUL_CONTINUE
)
1753 if (emul_is_noncanonical_address(get_desc_base(&seg_desc
) |
1754 ((u64
)base3
<< 32), ctxt
))
1755 return emulate_gp(ctxt
, 0);
1758 ctxt
->ops
->set_segment(ctxt
, selector
, &seg_desc
, base3
, seg
);
1761 return X86EMUL_CONTINUE
;
1763 return emulate_exception(ctxt
, err_vec
, err_code
, true);
1766 static int load_segment_descriptor(struct x86_emulate_ctxt
*ctxt
,
1767 u16 selector
, int seg
)
1769 u8 cpl
= ctxt
->ops
->cpl(ctxt
);
1772 * None of MOV, POP and LSS can load a NULL selector in CPL=3, but
1773 * they can load it at CPL<3 (Intel's manual says only LSS can,
1776 * However, the Intel manual says that putting IST=1/DPL=3 in
1777 * an interrupt gate will result in SS=3 (the AMD manual instead
1778 * says it doesn't), so allow SS=3 in __load_segment_descriptor
1779 * and only forbid it here.
1781 if (seg
== VCPU_SREG_SS
&& selector
== 3 &&
1782 ctxt
->mode
== X86EMUL_MODE_PROT64
)
1783 return emulate_exception(ctxt
, GP_VECTOR
, 0, true);
1785 return __load_segment_descriptor(ctxt
, selector
, seg
, cpl
,
1786 X86_TRANSFER_NONE
, NULL
);
1789 static void write_register_operand(struct operand
*op
)
1791 return assign_register(op
->addr
.reg
, op
->val
, op
->bytes
);
1794 static int writeback(struct x86_emulate_ctxt
*ctxt
, struct operand
*op
)
1798 write_register_operand(op
);
1801 if (ctxt
->lock_prefix
)
1802 return segmented_cmpxchg(ctxt
,
1808 return segmented_write(ctxt
,
1814 return segmented_write(ctxt
,
1817 op
->bytes
* op
->count
);
1820 write_sse_reg(ctxt
, &op
->vec_val
, op
->addr
.xmm
);
1823 write_mmx_reg(ctxt
, &op
->mm_val
, op
->addr
.mm
);
1831 return X86EMUL_CONTINUE
;
1834 static int push(struct x86_emulate_ctxt
*ctxt
, void *data
, int bytes
)
1836 struct segmented_address addr
;
1838 rsp_increment(ctxt
, -bytes
);
1839 addr
.ea
= reg_read(ctxt
, VCPU_REGS_RSP
) & stack_mask(ctxt
);
1840 addr
.seg
= VCPU_SREG_SS
;
1842 return segmented_write(ctxt
, addr
, data
, bytes
);
1845 static int em_push(struct x86_emulate_ctxt
*ctxt
)
1847 /* Disable writeback. */
1848 ctxt
->dst
.type
= OP_NONE
;
1849 return push(ctxt
, &ctxt
->src
.val
, ctxt
->op_bytes
);
1852 static int emulate_pop(struct x86_emulate_ctxt
*ctxt
,
1853 void *dest
, int len
)
1856 struct segmented_address addr
;
1858 addr
.ea
= reg_read(ctxt
, VCPU_REGS_RSP
) & stack_mask(ctxt
);
1859 addr
.seg
= VCPU_SREG_SS
;
1860 rc
= segmented_read(ctxt
, addr
, dest
, len
);
1861 if (rc
!= X86EMUL_CONTINUE
)
1864 rsp_increment(ctxt
, len
);
1868 static int em_pop(struct x86_emulate_ctxt
*ctxt
)
1870 return emulate_pop(ctxt
, &ctxt
->dst
.val
, ctxt
->op_bytes
);
1873 static int emulate_popf(struct x86_emulate_ctxt
*ctxt
,
1874 void *dest
, int len
)
1877 unsigned long val
, change_mask
;
1878 int iopl
= (ctxt
->eflags
& X86_EFLAGS_IOPL
) >> X86_EFLAGS_IOPL_BIT
;
1879 int cpl
= ctxt
->ops
->cpl(ctxt
);
1881 rc
= emulate_pop(ctxt
, &val
, len
);
1882 if (rc
!= X86EMUL_CONTINUE
)
1885 change_mask
= X86_EFLAGS_CF
| X86_EFLAGS_PF
| X86_EFLAGS_AF
|
1886 X86_EFLAGS_ZF
| X86_EFLAGS_SF
| X86_EFLAGS_OF
|
1887 X86_EFLAGS_TF
| X86_EFLAGS_DF
| X86_EFLAGS_NT
|
1888 X86_EFLAGS_AC
| X86_EFLAGS_ID
;
1890 switch(ctxt
->mode
) {
1891 case X86EMUL_MODE_PROT64
:
1892 case X86EMUL_MODE_PROT32
:
1893 case X86EMUL_MODE_PROT16
:
1895 change_mask
|= X86_EFLAGS_IOPL
;
1897 change_mask
|= X86_EFLAGS_IF
;
1899 case X86EMUL_MODE_VM86
:
1901 return emulate_gp(ctxt
, 0);
1902 change_mask
|= X86_EFLAGS_IF
;
1904 default: /* real mode */
1905 change_mask
|= (X86_EFLAGS_IOPL
| X86_EFLAGS_IF
);
1909 *(unsigned long *)dest
=
1910 (ctxt
->eflags
& ~change_mask
) | (val
& change_mask
);
1915 static int em_popf(struct x86_emulate_ctxt
*ctxt
)
1917 ctxt
->dst
.type
= OP_REG
;
1918 ctxt
->dst
.addr
.reg
= &ctxt
->eflags
;
1919 ctxt
->dst
.bytes
= ctxt
->op_bytes
;
1920 return emulate_popf(ctxt
, &ctxt
->dst
.val
, ctxt
->op_bytes
);
1923 static int em_enter(struct x86_emulate_ctxt
*ctxt
)
1926 unsigned frame_size
= ctxt
->src
.val
;
1927 unsigned nesting_level
= ctxt
->src2
.val
& 31;
1931 return X86EMUL_UNHANDLEABLE
;
1933 rbp
= reg_read(ctxt
, VCPU_REGS_RBP
);
1934 rc
= push(ctxt
, &rbp
, stack_size(ctxt
));
1935 if (rc
!= X86EMUL_CONTINUE
)
1937 assign_masked(reg_rmw(ctxt
, VCPU_REGS_RBP
), reg_read(ctxt
, VCPU_REGS_RSP
),
1939 assign_masked(reg_rmw(ctxt
, VCPU_REGS_RSP
),
1940 reg_read(ctxt
, VCPU_REGS_RSP
) - frame_size
,
1942 return X86EMUL_CONTINUE
;
1945 static int em_leave(struct x86_emulate_ctxt
*ctxt
)
1947 assign_masked(reg_rmw(ctxt
, VCPU_REGS_RSP
), reg_read(ctxt
, VCPU_REGS_RBP
),
1949 return emulate_pop(ctxt
, reg_rmw(ctxt
, VCPU_REGS_RBP
), ctxt
->op_bytes
);
1952 static int em_push_sreg(struct x86_emulate_ctxt
*ctxt
)
1954 int seg
= ctxt
->src2
.val
;
1956 ctxt
->src
.val
= get_segment_selector(ctxt
, seg
);
1957 if (ctxt
->op_bytes
== 4) {
1958 rsp_increment(ctxt
, -2);
1962 return em_push(ctxt
);
1965 static int em_pop_sreg(struct x86_emulate_ctxt
*ctxt
)
1967 int seg
= ctxt
->src2
.val
;
1968 unsigned long selector
;
1971 rc
= emulate_pop(ctxt
, &selector
, 2);
1972 if (rc
!= X86EMUL_CONTINUE
)
1975 if (ctxt
->modrm_reg
== VCPU_SREG_SS
)
1976 ctxt
->interruptibility
= KVM_X86_SHADOW_INT_MOV_SS
;
1977 if (ctxt
->op_bytes
> 2)
1978 rsp_increment(ctxt
, ctxt
->op_bytes
- 2);
1980 rc
= load_segment_descriptor(ctxt
, (u16
)selector
, seg
);
1984 static int em_pusha(struct x86_emulate_ctxt
*ctxt
)
1986 unsigned long old_esp
= reg_read(ctxt
, VCPU_REGS_RSP
);
1987 int rc
= X86EMUL_CONTINUE
;
1988 int reg
= VCPU_REGS_RAX
;
1990 while (reg
<= VCPU_REGS_RDI
) {
1991 (reg
== VCPU_REGS_RSP
) ?
1992 (ctxt
->src
.val
= old_esp
) : (ctxt
->src
.val
= reg_read(ctxt
, reg
));
1995 if (rc
!= X86EMUL_CONTINUE
)
2004 static int em_pushf(struct x86_emulate_ctxt
*ctxt
)
2006 ctxt
->src
.val
= (unsigned long)ctxt
->eflags
& ~X86_EFLAGS_VM
;
2007 return em_push(ctxt
);
2010 static int em_popa(struct x86_emulate_ctxt
*ctxt
)
2012 int rc
= X86EMUL_CONTINUE
;
2013 int reg
= VCPU_REGS_RDI
;
2016 while (reg
>= VCPU_REGS_RAX
) {
2017 if (reg
== VCPU_REGS_RSP
) {
2018 rsp_increment(ctxt
, ctxt
->op_bytes
);
2022 rc
= emulate_pop(ctxt
, &val
, ctxt
->op_bytes
);
2023 if (rc
!= X86EMUL_CONTINUE
)
2025 assign_register(reg_rmw(ctxt
, reg
), val
, ctxt
->op_bytes
);
2031 static int __emulate_int_real(struct x86_emulate_ctxt
*ctxt
, int irq
)
2033 const struct x86_emulate_ops
*ops
= ctxt
->ops
;
2040 /* TODO: Add limit checks */
2041 ctxt
->src
.val
= ctxt
->eflags
;
2043 if (rc
!= X86EMUL_CONTINUE
)
2046 ctxt
->eflags
&= ~(X86_EFLAGS_IF
| X86_EFLAGS_TF
| X86_EFLAGS_AC
);
2048 ctxt
->src
.val
= get_segment_selector(ctxt
, VCPU_SREG_CS
);
2050 if (rc
!= X86EMUL_CONTINUE
)
2053 ctxt
->src
.val
= ctxt
->_eip
;
2055 if (rc
!= X86EMUL_CONTINUE
)
2058 ops
->get_idt(ctxt
, &dt
);
2060 eip_addr
= dt
.address
+ (irq
<< 2);
2061 cs_addr
= dt
.address
+ (irq
<< 2) + 2;
2063 rc
= linear_read_system(ctxt
, cs_addr
, &cs
, 2);
2064 if (rc
!= X86EMUL_CONTINUE
)
2067 rc
= linear_read_system(ctxt
, eip_addr
, &eip
, 2);
2068 if (rc
!= X86EMUL_CONTINUE
)
2071 rc
= load_segment_descriptor(ctxt
, cs
, VCPU_SREG_CS
);
2072 if (rc
!= X86EMUL_CONTINUE
)
2080 int emulate_int_real(struct x86_emulate_ctxt
*ctxt
, int irq
)
2084 invalidate_registers(ctxt
);
2085 rc
= __emulate_int_real(ctxt
, irq
);
2086 if (rc
== X86EMUL_CONTINUE
)
2087 writeback_registers(ctxt
);
2091 static int emulate_int(struct x86_emulate_ctxt
*ctxt
, int irq
)
2093 switch(ctxt
->mode
) {
2094 case X86EMUL_MODE_REAL
:
2095 return __emulate_int_real(ctxt
, irq
);
2096 case X86EMUL_MODE_VM86
:
2097 case X86EMUL_MODE_PROT16
:
2098 case X86EMUL_MODE_PROT32
:
2099 case X86EMUL_MODE_PROT64
:
2101 /* Protected mode interrupts unimplemented yet */
2102 return X86EMUL_UNHANDLEABLE
;
2106 static int emulate_iret_real(struct x86_emulate_ctxt
*ctxt
)
2108 int rc
= X86EMUL_CONTINUE
;
2109 unsigned long temp_eip
= 0;
2110 unsigned long temp_eflags
= 0;
2111 unsigned long cs
= 0;
2112 unsigned long mask
= X86_EFLAGS_CF
| X86_EFLAGS_PF
| X86_EFLAGS_AF
|
2113 X86_EFLAGS_ZF
| X86_EFLAGS_SF
| X86_EFLAGS_TF
|
2114 X86_EFLAGS_IF
| X86_EFLAGS_DF
| X86_EFLAGS_OF
|
2115 X86_EFLAGS_IOPL
| X86_EFLAGS_NT
| X86_EFLAGS_RF
|
2116 X86_EFLAGS_AC
| X86_EFLAGS_ID
|
2118 unsigned long vm86_mask
= X86_EFLAGS_VM
| X86_EFLAGS_VIF
|
2121 /* TODO: Add stack limit check */
2123 rc
= emulate_pop(ctxt
, &temp_eip
, ctxt
->op_bytes
);
2125 if (rc
!= X86EMUL_CONTINUE
)
2128 if (temp_eip
& ~0xffff)
2129 return emulate_gp(ctxt
, 0);
2131 rc
= emulate_pop(ctxt
, &cs
, ctxt
->op_bytes
);
2133 if (rc
!= X86EMUL_CONTINUE
)
2136 rc
= emulate_pop(ctxt
, &temp_eflags
, ctxt
->op_bytes
);
2138 if (rc
!= X86EMUL_CONTINUE
)
2141 rc
= load_segment_descriptor(ctxt
, (u16
)cs
, VCPU_SREG_CS
);
2143 if (rc
!= X86EMUL_CONTINUE
)
2146 ctxt
->_eip
= temp_eip
;
2148 if (ctxt
->op_bytes
== 4)
2149 ctxt
->eflags
= ((temp_eflags
& mask
) | (ctxt
->eflags
& vm86_mask
));
2150 else if (ctxt
->op_bytes
== 2) {
2151 ctxt
->eflags
&= ~0xffff;
2152 ctxt
->eflags
|= temp_eflags
;
2155 ctxt
->eflags
&= ~EFLG_RESERVED_ZEROS_MASK
; /* Clear reserved zeros */
2156 ctxt
->eflags
|= X86_EFLAGS_FIXED
;
2157 ctxt
->ops
->set_nmi_mask(ctxt
, false);
2162 static int em_iret(struct x86_emulate_ctxt
*ctxt
)
2164 switch(ctxt
->mode
) {
2165 case X86EMUL_MODE_REAL
:
2166 return emulate_iret_real(ctxt
);
2167 case X86EMUL_MODE_VM86
:
2168 case X86EMUL_MODE_PROT16
:
2169 case X86EMUL_MODE_PROT32
:
2170 case X86EMUL_MODE_PROT64
:
2172 /* iret from protected mode unimplemented yet */
2173 return X86EMUL_UNHANDLEABLE
;
2177 static int em_jmp_far(struct x86_emulate_ctxt
*ctxt
)
2181 struct desc_struct new_desc
;
2182 u8 cpl
= ctxt
->ops
->cpl(ctxt
);
2184 memcpy(&sel
, ctxt
->src
.valptr
+ ctxt
->op_bytes
, 2);
2186 rc
= __load_segment_descriptor(ctxt
, sel
, VCPU_SREG_CS
, cpl
,
2187 X86_TRANSFER_CALL_JMP
,
2189 if (rc
!= X86EMUL_CONTINUE
)
2192 rc
= assign_eip_far(ctxt
, ctxt
->src
.val
, &new_desc
);
2193 /* Error handling is not implemented. */
2194 if (rc
!= X86EMUL_CONTINUE
)
2195 return X86EMUL_UNHANDLEABLE
;
2200 static int em_jmp_abs(struct x86_emulate_ctxt
*ctxt
)
2202 return assign_eip_near(ctxt
, ctxt
->src
.val
);
2205 static int em_call_near_abs(struct x86_emulate_ctxt
*ctxt
)
2210 old_eip
= ctxt
->_eip
;
2211 rc
= assign_eip_near(ctxt
, ctxt
->src
.val
);
2212 if (rc
!= X86EMUL_CONTINUE
)
2214 ctxt
->src
.val
= old_eip
;
2219 static int em_cmpxchg8b(struct x86_emulate_ctxt
*ctxt
)
2221 u64 old
= ctxt
->dst
.orig_val64
;
2223 if (ctxt
->dst
.bytes
== 16)
2224 return X86EMUL_UNHANDLEABLE
;
2226 if (((u32
) (old
>> 0) != (u32
) reg_read(ctxt
, VCPU_REGS_RAX
)) ||
2227 ((u32
) (old
>> 32) != (u32
) reg_read(ctxt
, VCPU_REGS_RDX
))) {
2228 *reg_write(ctxt
, VCPU_REGS_RAX
) = (u32
) (old
>> 0);
2229 *reg_write(ctxt
, VCPU_REGS_RDX
) = (u32
) (old
>> 32);
2230 ctxt
->eflags
&= ~X86_EFLAGS_ZF
;
2232 ctxt
->dst
.val64
= ((u64
)reg_read(ctxt
, VCPU_REGS_RCX
) << 32) |
2233 (u32
) reg_read(ctxt
, VCPU_REGS_RBX
);
2235 ctxt
->eflags
|= X86_EFLAGS_ZF
;
2237 return X86EMUL_CONTINUE
;
2240 static int em_ret(struct x86_emulate_ctxt
*ctxt
)
2245 rc
= emulate_pop(ctxt
, &eip
, ctxt
->op_bytes
);
2246 if (rc
!= X86EMUL_CONTINUE
)
2249 return assign_eip_near(ctxt
, eip
);
2252 static int em_ret_far(struct x86_emulate_ctxt
*ctxt
)
2255 unsigned long eip
, cs
;
2256 int cpl
= ctxt
->ops
->cpl(ctxt
);
2257 struct desc_struct new_desc
;
2259 rc
= emulate_pop(ctxt
, &eip
, ctxt
->op_bytes
);
2260 if (rc
!= X86EMUL_CONTINUE
)
2262 rc
= emulate_pop(ctxt
, &cs
, ctxt
->op_bytes
);
2263 if (rc
!= X86EMUL_CONTINUE
)
2265 /* Outer-privilege level return is not implemented */
2266 if (ctxt
->mode
>= X86EMUL_MODE_PROT16
&& (cs
& 3) > cpl
)
2267 return X86EMUL_UNHANDLEABLE
;
2268 rc
= __load_segment_descriptor(ctxt
, (u16
)cs
, VCPU_SREG_CS
, cpl
,
2271 if (rc
!= X86EMUL_CONTINUE
)
2273 rc
= assign_eip_far(ctxt
, eip
, &new_desc
);
2274 /* Error handling is not implemented. */
2275 if (rc
!= X86EMUL_CONTINUE
)
2276 return X86EMUL_UNHANDLEABLE
;
2281 static int em_ret_far_imm(struct x86_emulate_ctxt
*ctxt
)
2285 rc
= em_ret_far(ctxt
);
2286 if (rc
!= X86EMUL_CONTINUE
)
2288 rsp_increment(ctxt
, ctxt
->src
.val
);
2289 return X86EMUL_CONTINUE
;
2292 static int em_cmpxchg(struct x86_emulate_ctxt
*ctxt
)
2294 /* Save real source value, then compare EAX against destination. */
2295 ctxt
->dst
.orig_val
= ctxt
->dst
.val
;
2296 ctxt
->dst
.val
= reg_read(ctxt
, VCPU_REGS_RAX
);
2297 ctxt
->src
.orig_val
= ctxt
->src
.val
;
2298 ctxt
->src
.val
= ctxt
->dst
.orig_val
;
2299 fastop(ctxt
, em_cmp
);
2301 if (ctxt
->eflags
& X86_EFLAGS_ZF
) {
2302 /* Success: write back to memory; no update of EAX */
2303 ctxt
->src
.type
= OP_NONE
;
2304 ctxt
->dst
.val
= ctxt
->src
.orig_val
;
2306 /* Failure: write the value we saw to EAX. */
2307 ctxt
->src
.type
= OP_REG
;
2308 ctxt
->src
.addr
.reg
= reg_rmw(ctxt
, VCPU_REGS_RAX
);
2309 ctxt
->src
.val
= ctxt
->dst
.orig_val
;
2310 /* Create write-cycle to dest by writing the same value */
2311 ctxt
->dst
.val
= ctxt
->dst
.orig_val
;
2313 return X86EMUL_CONTINUE
;
2316 static int em_lseg(struct x86_emulate_ctxt
*ctxt
)
2318 int seg
= ctxt
->src2
.val
;
2322 memcpy(&sel
, ctxt
->src
.valptr
+ ctxt
->op_bytes
, 2);
2324 rc
= load_segment_descriptor(ctxt
, sel
, seg
);
2325 if (rc
!= X86EMUL_CONTINUE
)
2328 ctxt
->dst
.val
= ctxt
->src
.val
;
2332 static int emulator_has_longmode(struct x86_emulate_ctxt
*ctxt
)
2334 u32 eax
, ebx
, ecx
, edx
;
2338 ctxt
->ops
->get_cpuid(ctxt
, &eax
, &ebx
, &ecx
, &edx
, false);
2339 return edx
& bit(X86_FEATURE_LM
);
2342 #define GET_SMSTATE(type, smbase, offset) \
2345 int r = ctxt->ops->read_phys(ctxt, smbase + offset, &__val, \
2347 if (r != X86EMUL_CONTINUE) \
2348 return X86EMUL_UNHANDLEABLE; \
2352 static void rsm_set_desc_flags(struct desc_struct
*desc
, u32 flags
)
2354 desc
->g
= (flags
>> 23) & 1;
2355 desc
->d
= (flags
>> 22) & 1;
2356 desc
->l
= (flags
>> 21) & 1;
2357 desc
->avl
= (flags
>> 20) & 1;
2358 desc
->p
= (flags
>> 15) & 1;
2359 desc
->dpl
= (flags
>> 13) & 3;
2360 desc
->s
= (flags
>> 12) & 1;
2361 desc
->type
= (flags
>> 8) & 15;
2364 static int rsm_load_seg_32(struct x86_emulate_ctxt
*ctxt
, u64 smbase
, int n
)
2366 struct desc_struct desc
;
2370 selector
= GET_SMSTATE(u32
, smbase
, 0x7fa8 + n
* 4);
2373 offset
= 0x7f84 + n
* 12;
2375 offset
= 0x7f2c + (n
- 3) * 12;
2377 set_desc_base(&desc
, GET_SMSTATE(u32
, smbase
, offset
+ 8));
2378 set_desc_limit(&desc
, GET_SMSTATE(u32
, smbase
, offset
+ 4));
2379 rsm_set_desc_flags(&desc
, GET_SMSTATE(u32
, smbase
, offset
));
2380 ctxt
->ops
->set_segment(ctxt
, selector
, &desc
, 0, n
);
2381 return X86EMUL_CONTINUE
;
2384 static int rsm_load_seg_64(struct x86_emulate_ctxt
*ctxt
, u64 smbase
, int n
)
2386 struct desc_struct desc
;
2391 offset
= 0x7e00 + n
* 16;
2393 selector
= GET_SMSTATE(u16
, smbase
, offset
);
2394 rsm_set_desc_flags(&desc
, GET_SMSTATE(u16
, smbase
, offset
+ 2) << 8);
2395 set_desc_limit(&desc
, GET_SMSTATE(u32
, smbase
, offset
+ 4));
2396 set_desc_base(&desc
, GET_SMSTATE(u32
, smbase
, offset
+ 8));
2397 base3
= GET_SMSTATE(u32
, smbase
, offset
+ 12);
2399 ctxt
->ops
->set_segment(ctxt
, selector
, &desc
, base3
, n
);
2400 return X86EMUL_CONTINUE
;
2403 static int rsm_enter_protected_mode(struct x86_emulate_ctxt
*ctxt
,
2404 u64 cr0
, u64 cr3
, u64 cr4
)
2409 /* In order to later set CR4.PCIDE, CR3[11:0] must be zero. */
2411 if (cr4
& X86_CR4_PCIDE
) {
2416 bad
= ctxt
->ops
->set_cr(ctxt
, 3, cr3
);
2418 return X86EMUL_UNHANDLEABLE
;
2421 * First enable PAE, long mode needs it before CR0.PG = 1 is set.
2422 * Then enable protected mode. However, PCID cannot be enabled
2423 * if EFER.LMA=0, so set it separately.
2425 bad
= ctxt
->ops
->set_cr(ctxt
, 4, cr4
& ~X86_CR4_PCIDE
);
2427 return X86EMUL_UNHANDLEABLE
;
2429 bad
= ctxt
->ops
->set_cr(ctxt
, 0, cr0
);
2431 return X86EMUL_UNHANDLEABLE
;
2433 if (cr4
& X86_CR4_PCIDE
) {
2434 bad
= ctxt
->ops
->set_cr(ctxt
, 4, cr4
);
2436 return X86EMUL_UNHANDLEABLE
;
2438 bad
= ctxt
->ops
->set_cr(ctxt
, 3, cr3
| pcid
);
2440 return X86EMUL_UNHANDLEABLE
;
2445 return X86EMUL_CONTINUE
;
2448 static int rsm_load_state_32(struct x86_emulate_ctxt
*ctxt
, u64 smbase
)
2450 struct desc_struct desc
;
2453 u32 val
, cr0
, cr3
, cr4
;
2456 cr0
= GET_SMSTATE(u32
, smbase
, 0x7ffc);
2457 cr3
= GET_SMSTATE(u32
, smbase
, 0x7ff8);
2458 ctxt
->eflags
= GET_SMSTATE(u32
, smbase
, 0x7ff4) | X86_EFLAGS_FIXED
;
2459 ctxt
->_eip
= GET_SMSTATE(u32
, smbase
, 0x7ff0);
2461 for (i
= 0; i
< 8; i
++)
2462 *reg_write(ctxt
, i
) = GET_SMSTATE(u32
, smbase
, 0x7fd0 + i
* 4);
2464 val
= GET_SMSTATE(u32
, smbase
, 0x7fcc);
2465 ctxt
->ops
->set_dr(ctxt
, 6, (val
& DR6_VOLATILE
) | DR6_FIXED_1
);
2466 val
= GET_SMSTATE(u32
, smbase
, 0x7fc8);
2467 ctxt
->ops
->set_dr(ctxt
, 7, (val
& DR7_VOLATILE
) | DR7_FIXED_1
);
2469 selector
= GET_SMSTATE(u32
, smbase
, 0x7fc4);
2470 set_desc_base(&desc
, GET_SMSTATE(u32
, smbase
, 0x7f64));
2471 set_desc_limit(&desc
, GET_SMSTATE(u32
, smbase
, 0x7f60));
2472 rsm_set_desc_flags(&desc
, GET_SMSTATE(u32
, smbase
, 0x7f5c));
2473 ctxt
->ops
->set_segment(ctxt
, selector
, &desc
, 0, VCPU_SREG_TR
);
2475 selector
= GET_SMSTATE(u32
, smbase
, 0x7fc0);
2476 set_desc_base(&desc
, GET_SMSTATE(u32
, smbase
, 0x7f80));
2477 set_desc_limit(&desc
, GET_SMSTATE(u32
, smbase
, 0x7f7c));
2478 rsm_set_desc_flags(&desc
, GET_SMSTATE(u32
, smbase
, 0x7f78));
2479 ctxt
->ops
->set_segment(ctxt
, selector
, &desc
, 0, VCPU_SREG_LDTR
);
2481 dt
.address
= GET_SMSTATE(u32
, smbase
, 0x7f74);
2482 dt
.size
= GET_SMSTATE(u32
, smbase
, 0x7f70);
2483 ctxt
->ops
->set_gdt(ctxt
, &dt
);
2485 dt
.address
= GET_SMSTATE(u32
, smbase
, 0x7f58);
2486 dt
.size
= GET_SMSTATE(u32
, smbase
, 0x7f54);
2487 ctxt
->ops
->set_idt(ctxt
, &dt
);
2489 for (i
= 0; i
< 6; i
++) {
2490 int r
= rsm_load_seg_32(ctxt
, smbase
, i
);
2491 if (r
!= X86EMUL_CONTINUE
)
2495 cr4
= GET_SMSTATE(u32
, smbase
, 0x7f14);
2497 ctxt
->ops
->set_smbase(ctxt
, GET_SMSTATE(u32
, smbase
, 0x7ef8));
2499 return rsm_enter_protected_mode(ctxt
, cr0
, cr3
, cr4
);
2502 static int rsm_load_state_64(struct x86_emulate_ctxt
*ctxt
, u64 smbase
)
2504 struct desc_struct desc
;
2506 u64 val
, cr0
, cr3
, cr4
;
2511 for (i
= 0; i
< 16; i
++)
2512 *reg_write(ctxt
, i
) = GET_SMSTATE(u64
, smbase
, 0x7ff8 - i
* 8);
2514 ctxt
->_eip
= GET_SMSTATE(u64
, smbase
, 0x7f78);
2515 ctxt
->eflags
= GET_SMSTATE(u32
, smbase
, 0x7f70) | X86_EFLAGS_FIXED
;
2517 val
= GET_SMSTATE(u32
, smbase
, 0x7f68);
2518 ctxt
->ops
->set_dr(ctxt
, 6, (val
& DR6_VOLATILE
) | DR6_FIXED_1
);
2519 val
= GET_SMSTATE(u32
, smbase
, 0x7f60);
2520 ctxt
->ops
->set_dr(ctxt
, 7, (val
& DR7_VOLATILE
) | DR7_FIXED_1
);
2522 cr0
= GET_SMSTATE(u64
, smbase
, 0x7f58);
2523 cr3
= GET_SMSTATE(u64
, smbase
, 0x7f50);
2524 cr4
= GET_SMSTATE(u64
, smbase
, 0x7f48);
2525 ctxt
->ops
->set_smbase(ctxt
, GET_SMSTATE(u32
, smbase
, 0x7f00));
2526 val
= GET_SMSTATE(u64
, smbase
, 0x7ed0);
2527 ctxt
->ops
->set_msr(ctxt
, MSR_EFER
, val
& ~EFER_LMA
);
2529 selector
= GET_SMSTATE(u32
, smbase
, 0x7e90);
2530 rsm_set_desc_flags(&desc
, GET_SMSTATE(u32
, smbase
, 0x7e92) << 8);
2531 set_desc_limit(&desc
, GET_SMSTATE(u32
, smbase
, 0x7e94));
2532 set_desc_base(&desc
, GET_SMSTATE(u32
, smbase
, 0x7e98));
2533 base3
= GET_SMSTATE(u32
, smbase
, 0x7e9c);
2534 ctxt
->ops
->set_segment(ctxt
, selector
, &desc
, base3
, VCPU_SREG_TR
);
2536 dt
.size
= GET_SMSTATE(u32
, smbase
, 0x7e84);
2537 dt
.address
= GET_SMSTATE(u64
, smbase
, 0x7e88);
2538 ctxt
->ops
->set_idt(ctxt
, &dt
);
2540 selector
= GET_SMSTATE(u32
, smbase
, 0x7e70);
2541 rsm_set_desc_flags(&desc
, GET_SMSTATE(u32
, smbase
, 0x7e72) << 8);
2542 set_desc_limit(&desc
, GET_SMSTATE(u32
, smbase
, 0x7e74));
2543 set_desc_base(&desc
, GET_SMSTATE(u32
, smbase
, 0x7e78));
2544 base3
= GET_SMSTATE(u32
, smbase
, 0x7e7c);
2545 ctxt
->ops
->set_segment(ctxt
, selector
, &desc
, base3
, VCPU_SREG_LDTR
);
2547 dt
.size
= GET_SMSTATE(u32
, smbase
, 0x7e64);
2548 dt
.address
= GET_SMSTATE(u64
, smbase
, 0x7e68);
2549 ctxt
->ops
->set_gdt(ctxt
, &dt
);
2551 r
= rsm_enter_protected_mode(ctxt
, cr0
, cr3
, cr4
);
2552 if (r
!= X86EMUL_CONTINUE
)
2555 for (i
= 0; i
< 6; i
++) {
2556 r
= rsm_load_seg_64(ctxt
, smbase
, i
);
2557 if (r
!= X86EMUL_CONTINUE
)
2561 return X86EMUL_CONTINUE
;
2564 static int em_rsm(struct x86_emulate_ctxt
*ctxt
)
2566 unsigned long cr0
, cr4
, efer
;
2570 if ((ctxt
->ops
->get_hflags(ctxt
) & X86EMUL_SMM_MASK
) == 0)
2571 return emulate_ud(ctxt
);
2574 * Get back to real mode, to prepare a safe state in which to load
2575 * CR0/CR3/CR4/EFER. It's all a bit more complicated if the vCPU
2576 * supports long mode.
2578 cr4
= ctxt
->ops
->get_cr(ctxt
, 4);
2579 if (emulator_has_longmode(ctxt
)) {
2580 struct desc_struct cs_desc
;
2582 /* Zero CR4.PCIDE before CR0.PG. */
2583 if (cr4
& X86_CR4_PCIDE
) {
2584 ctxt
->ops
->set_cr(ctxt
, 4, cr4
& ~X86_CR4_PCIDE
);
2585 cr4
&= ~X86_CR4_PCIDE
;
2588 /* A 32-bit code segment is required to clear EFER.LMA. */
2589 memset(&cs_desc
, 0, sizeof(cs_desc
));
2591 cs_desc
.s
= cs_desc
.g
= cs_desc
.p
= 1;
2592 ctxt
->ops
->set_segment(ctxt
, 0, &cs_desc
, 0, VCPU_SREG_CS
);
2595 /* For the 64-bit case, this will clear EFER.LMA. */
2596 cr0
= ctxt
->ops
->get_cr(ctxt
, 0);
2597 if (cr0
& X86_CR0_PE
)
2598 ctxt
->ops
->set_cr(ctxt
, 0, cr0
& ~(X86_CR0_PG
| X86_CR0_PE
));
2600 /* Now clear CR4.PAE (which must be done before clearing EFER.LME). */
2601 if (cr4
& X86_CR4_PAE
)
2602 ctxt
->ops
->set_cr(ctxt
, 4, cr4
& ~X86_CR4_PAE
);
2604 /* And finally go back to 32-bit mode. */
2606 ctxt
->ops
->set_msr(ctxt
, MSR_EFER
, efer
);
2608 smbase
= ctxt
->ops
->get_smbase(ctxt
);
2611 * Give pre_leave_smm() a chance to make ISA-specific changes to the
2612 * vCPU state (e.g. enter guest mode) before loading state from the SMM
2615 if (ctxt
->ops
->pre_leave_smm(ctxt
, smbase
))
2616 return X86EMUL_UNHANDLEABLE
;
2618 if (emulator_has_longmode(ctxt
))
2619 ret
= rsm_load_state_64(ctxt
, smbase
+ 0x8000);
2621 ret
= rsm_load_state_32(ctxt
, smbase
+ 0x8000);
2623 if (ret
!= X86EMUL_CONTINUE
) {
2624 /* FIXME: should triple fault */
2625 return X86EMUL_UNHANDLEABLE
;
2628 if ((ctxt
->ops
->get_hflags(ctxt
) & X86EMUL_SMM_INSIDE_NMI_MASK
) == 0)
2629 ctxt
->ops
->set_nmi_mask(ctxt
, false);
2631 ctxt
->ops
->set_hflags(ctxt
, ctxt
->ops
->get_hflags(ctxt
) &
2632 ~(X86EMUL_SMM_INSIDE_NMI_MASK
| X86EMUL_SMM_MASK
));
2633 return X86EMUL_CONTINUE
;
2637 setup_syscalls_segments(struct x86_emulate_ctxt
*ctxt
,
2638 struct desc_struct
*cs
, struct desc_struct
*ss
)
2640 cs
->l
= 0; /* will be adjusted later */
2641 set_desc_base(cs
, 0); /* flat segment */
2642 cs
->g
= 1; /* 4kb granularity */
2643 set_desc_limit(cs
, 0xfffff); /* 4GB limit */
2644 cs
->type
= 0x0b; /* Read, Execute, Accessed */
2646 cs
->dpl
= 0; /* will be adjusted later */
2651 set_desc_base(ss
, 0); /* flat segment */
2652 set_desc_limit(ss
, 0xfffff); /* 4GB limit */
2653 ss
->g
= 1; /* 4kb granularity */
2655 ss
->type
= 0x03; /* Read/Write, Accessed */
2656 ss
->d
= 1; /* 32bit stack segment */
2663 static bool vendor_intel(struct x86_emulate_ctxt
*ctxt
)
2665 u32 eax
, ebx
, ecx
, edx
;
2668 ctxt
->ops
->get_cpuid(ctxt
, &eax
, &ebx
, &ecx
, &edx
, false);
2669 return ebx
== X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
2670 && ecx
== X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
2671 && edx
== X86EMUL_CPUID_VENDOR_GenuineIntel_edx
;
2674 static bool em_syscall_is_enabled(struct x86_emulate_ctxt
*ctxt
)
2676 const struct x86_emulate_ops
*ops
= ctxt
->ops
;
2677 u32 eax
, ebx
, ecx
, edx
;
2680 * syscall should always be enabled in longmode - so only become
2681 * vendor specific (cpuid) if other modes are active...
2683 if (ctxt
->mode
== X86EMUL_MODE_PROT64
)
2688 ops
->get_cpuid(ctxt
, &eax
, &ebx
, &ecx
, &edx
, false);
2690 * Intel ("GenuineIntel")
2691 * remark: Intel CPUs only support "syscall" in 64bit
2692 * longmode. Also an 64bit guest with a
2693 * 32bit compat-app running will #UD !! While this
2694 * behaviour can be fixed (by emulating) into AMD
2695 * response - CPUs of AMD can't behave like Intel.
2697 if (ebx
== X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
&&
2698 ecx
== X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
&&
2699 edx
== X86EMUL_CPUID_VENDOR_GenuineIntel_edx
)
2702 /* AMD ("AuthenticAMD") */
2703 if (ebx
== X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx
&&
2704 ecx
== X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx
&&
2705 edx
== X86EMUL_CPUID_VENDOR_AuthenticAMD_edx
)
2708 /* AMD ("AMDisbetter!") */
2709 if (ebx
== X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx
&&
2710 ecx
== X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx
&&
2711 edx
== X86EMUL_CPUID_VENDOR_AMDisbetterI_edx
)
2714 /* Hygon ("HygonGenuine") */
2715 if (ebx
== X86EMUL_CPUID_VENDOR_HygonGenuine_ebx
&&
2716 ecx
== X86EMUL_CPUID_VENDOR_HygonGenuine_ecx
&&
2717 edx
== X86EMUL_CPUID_VENDOR_HygonGenuine_edx
)
2721 * default: (not Intel, not AMD, not Hygon), apply Intel's
2727 static int em_syscall(struct x86_emulate_ctxt
*ctxt
)
2729 const struct x86_emulate_ops
*ops
= ctxt
->ops
;
2730 struct desc_struct cs
, ss
;
2735 /* syscall is not available in real mode */
2736 if (ctxt
->mode
== X86EMUL_MODE_REAL
||
2737 ctxt
->mode
== X86EMUL_MODE_VM86
)
2738 return emulate_ud(ctxt
);
2740 if (!(em_syscall_is_enabled(ctxt
)))
2741 return emulate_ud(ctxt
);
2743 ops
->get_msr(ctxt
, MSR_EFER
, &efer
);
2744 setup_syscalls_segments(ctxt
, &cs
, &ss
);
2746 if (!(efer
& EFER_SCE
))
2747 return emulate_ud(ctxt
);
2749 ops
->get_msr(ctxt
, MSR_STAR
, &msr_data
);
2751 cs_sel
= (u16
)(msr_data
& 0xfffc);
2752 ss_sel
= (u16
)(msr_data
+ 8);
2754 if (efer
& EFER_LMA
) {
2758 ops
->set_segment(ctxt
, cs_sel
, &cs
, 0, VCPU_SREG_CS
);
2759 ops
->set_segment(ctxt
, ss_sel
, &ss
, 0, VCPU_SREG_SS
);
2761 *reg_write(ctxt
, VCPU_REGS_RCX
) = ctxt
->_eip
;
2762 if (efer
& EFER_LMA
) {
2763 #ifdef CONFIG_X86_64
2764 *reg_write(ctxt
, VCPU_REGS_R11
) = ctxt
->eflags
;
2767 ctxt
->mode
== X86EMUL_MODE_PROT64
?
2768 MSR_LSTAR
: MSR_CSTAR
, &msr_data
);
2769 ctxt
->_eip
= msr_data
;
2771 ops
->get_msr(ctxt
, MSR_SYSCALL_MASK
, &msr_data
);
2772 ctxt
->eflags
&= ~msr_data
;
2773 ctxt
->eflags
|= X86_EFLAGS_FIXED
;
2777 ops
->get_msr(ctxt
, MSR_STAR
, &msr_data
);
2778 ctxt
->_eip
= (u32
)msr_data
;
2780 ctxt
->eflags
&= ~(X86_EFLAGS_VM
| X86_EFLAGS_IF
);
2783 ctxt
->tf
= (ctxt
->eflags
& X86_EFLAGS_TF
) != 0;
2784 return X86EMUL_CONTINUE
;
2787 static int em_sysenter(struct x86_emulate_ctxt
*ctxt
)
2789 const struct x86_emulate_ops
*ops
= ctxt
->ops
;
2790 struct desc_struct cs
, ss
;
2795 ops
->get_msr(ctxt
, MSR_EFER
, &efer
);
2796 /* inject #GP if in real mode */
2797 if (ctxt
->mode
== X86EMUL_MODE_REAL
)
2798 return emulate_gp(ctxt
, 0);
2801 * Not recognized on AMD in compat mode (but is recognized in legacy
2804 if ((ctxt
->mode
!= X86EMUL_MODE_PROT64
) && (efer
& EFER_LMA
)
2805 && !vendor_intel(ctxt
))
2806 return emulate_ud(ctxt
);
2808 /* sysenter/sysexit have not been tested in 64bit mode. */
2809 if (ctxt
->mode
== X86EMUL_MODE_PROT64
)
2810 return X86EMUL_UNHANDLEABLE
;
2812 setup_syscalls_segments(ctxt
, &cs
, &ss
);
2814 ops
->get_msr(ctxt
, MSR_IA32_SYSENTER_CS
, &msr_data
);
2815 if ((msr_data
& 0xfffc) == 0x0)
2816 return emulate_gp(ctxt
, 0);
2818 ctxt
->eflags
&= ~(X86_EFLAGS_VM
| X86_EFLAGS_IF
);
2819 cs_sel
= (u16
)msr_data
& ~SEGMENT_RPL_MASK
;
2820 ss_sel
= cs_sel
+ 8;
2821 if (efer
& EFER_LMA
) {
2826 ops
->set_segment(ctxt
, cs_sel
, &cs
, 0, VCPU_SREG_CS
);
2827 ops
->set_segment(ctxt
, ss_sel
, &ss
, 0, VCPU_SREG_SS
);
2829 ops
->get_msr(ctxt
, MSR_IA32_SYSENTER_EIP
, &msr_data
);
2830 ctxt
->_eip
= (efer
& EFER_LMA
) ? msr_data
: (u32
)msr_data
;
2832 ops
->get_msr(ctxt
, MSR_IA32_SYSENTER_ESP
, &msr_data
);
2833 *reg_write(ctxt
, VCPU_REGS_RSP
) = (efer
& EFER_LMA
) ? msr_data
:
2836 return X86EMUL_CONTINUE
;
2839 static int em_sysexit(struct x86_emulate_ctxt
*ctxt
)
2841 const struct x86_emulate_ops
*ops
= ctxt
->ops
;
2842 struct desc_struct cs
, ss
;
2843 u64 msr_data
, rcx
, rdx
;
2845 u16 cs_sel
= 0, ss_sel
= 0;
2847 /* inject #GP if in real mode or Virtual 8086 mode */
2848 if (ctxt
->mode
== X86EMUL_MODE_REAL
||
2849 ctxt
->mode
== X86EMUL_MODE_VM86
)
2850 return emulate_gp(ctxt
, 0);
2852 setup_syscalls_segments(ctxt
, &cs
, &ss
);
2854 if ((ctxt
->rex_prefix
& 0x8) != 0x0)
2855 usermode
= X86EMUL_MODE_PROT64
;
2857 usermode
= X86EMUL_MODE_PROT32
;
2859 rcx
= reg_read(ctxt
, VCPU_REGS_RCX
);
2860 rdx
= reg_read(ctxt
, VCPU_REGS_RDX
);
2864 ops
->get_msr(ctxt
, MSR_IA32_SYSENTER_CS
, &msr_data
);
2866 case X86EMUL_MODE_PROT32
:
2867 cs_sel
= (u16
)(msr_data
+ 16);
2868 if ((msr_data
& 0xfffc) == 0x0)
2869 return emulate_gp(ctxt
, 0);
2870 ss_sel
= (u16
)(msr_data
+ 24);
2874 case X86EMUL_MODE_PROT64
:
2875 cs_sel
= (u16
)(msr_data
+ 32);
2876 if (msr_data
== 0x0)
2877 return emulate_gp(ctxt
, 0);
2878 ss_sel
= cs_sel
+ 8;
2881 if (emul_is_noncanonical_address(rcx
, ctxt
) ||
2882 emul_is_noncanonical_address(rdx
, ctxt
))
2883 return emulate_gp(ctxt
, 0);
2886 cs_sel
|= SEGMENT_RPL_MASK
;
2887 ss_sel
|= SEGMENT_RPL_MASK
;
2889 ops
->set_segment(ctxt
, cs_sel
, &cs
, 0, VCPU_SREG_CS
);
2890 ops
->set_segment(ctxt
, ss_sel
, &ss
, 0, VCPU_SREG_SS
);
2893 *reg_write(ctxt
, VCPU_REGS_RSP
) = rcx
;
2895 return X86EMUL_CONTINUE
;
2898 static bool emulator_bad_iopl(struct x86_emulate_ctxt
*ctxt
)
2901 if (ctxt
->mode
== X86EMUL_MODE_REAL
)
2903 if (ctxt
->mode
== X86EMUL_MODE_VM86
)
2905 iopl
= (ctxt
->eflags
& X86_EFLAGS_IOPL
) >> X86_EFLAGS_IOPL_BIT
;
2906 return ctxt
->ops
->cpl(ctxt
) > iopl
;
2909 #define VMWARE_PORT_VMPORT (0x5658)
2910 #define VMWARE_PORT_VMRPC (0x5659)
2912 static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt
*ctxt
,
2915 const struct x86_emulate_ops
*ops
= ctxt
->ops
;
2916 struct desc_struct tr_seg
;
2919 u16 tr
, io_bitmap_ptr
, perm
, bit_idx
= port
& 0x7;
2920 unsigned mask
= (1 << len
) - 1;
2924 * VMware allows access to these ports even if denied
2925 * by TSS I/O permission bitmap. Mimic behavior.
2927 if (enable_vmware_backdoor
&&
2928 ((port
== VMWARE_PORT_VMPORT
) || (port
== VMWARE_PORT_VMRPC
)))
2931 ops
->get_segment(ctxt
, &tr
, &tr_seg
, &base3
, VCPU_SREG_TR
);
2934 if (desc_limit_scaled(&tr_seg
) < 103)
2936 base
= get_desc_base(&tr_seg
);
2937 #ifdef CONFIG_X86_64
2938 base
|= ((u64
)base3
) << 32;
2940 r
= ops
->read_std(ctxt
, base
+ 102, &io_bitmap_ptr
, 2, NULL
, true);
2941 if (r
!= X86EMUL_CONTINUE
)
2943 if (io_bitmap_ptr
+ port
/8 > desc_limit_scaled(&tr_seg
))
2945 r
= ops
->read_std(ctxt
, base
+ io_bitmap_ptr
+ port
/8, &perm
, 2, NULL
, true);
2946 if (r
!= X86EMUL_CONTINUE
)
2948 if ((perm
>> bit_idx
) & mask
)
2953 static bool emulator_io_permited(struct x86_emulate_ctxt
*ctxt
,
2959 if (emulator_bad_iopl(ctxt
))
2960 if (!emulator_io_port_access_allowed(ctxt
, port
, len
))
2963 ctxt
->perm_ok
= true;
2968 static void string_registers_quirk(struct x86_emulate_ctxt
*ctxt
)
2971 * Intel CPUs mask the counter and pointers in quite strange
2972 * manner when ECX is zero due to REP-string optimizations.
2974 #ifdef CONFIG_X86_64
2975 if (ctxt
->ad_bytes
!= 4 || !vendor_intel(ctxt
))
2978 *reg_write(ctxt
, VCPU_REGS_RCX
) = 0;
2981 case 0xa4: /* movsb */
2982 case 0xa5: /* movsd/w */
2983 *reg_rmw(ctxt
, VCPU_REGS_RSI
) &= (u32
)-1;
2985 case 0xaa: /* stosb */
2986 case 0xab: /* stosd/w */
2987 *reg_rmw(ctxt
, VCPU_REGS_RDI
) &= (u32
)-1;
2992 static void save_state_to_tss16(struct x86_emulate_ctxt
*ctxt
,
2993 struct tss_segment_16
*tss
)
2995 tss
->ip
= ctxt
->_eip
;
2996 tss
->flag
= ctxt
->eflags
;
2997 tss
->ax
= reg_read(ctxt
, VCPU_REGS_RAX
);
2998 tss
->cx
= reg_read(ctxt
, VCPU_REGS_RCX
);
2999 tss
->dx
= reg_read(ctxt
, VCPU_REGS_RDX
);
3000 tss
->bx
= reg_read(ctxt
, VCPU_REGS_RBX
);
3001 tss
->sp
= reg_read(ctxt
, VCPU_REGS_RSP
);
3002 tss
->bp
= reg_read(ctxt
, VCPU_REGS_RBP
);
3003 tss
->si
= reg_read(ctxt
, VCPU_REGS_RSI
);
3004 tss
->di
= reg_read(ctxt
, VCPU_REGS_RDI
);
3006 tss
->es
= get_segment_selector(ctxt
, VCPU_SREG_ES
);
3007 tss
->cs
= get_segment_selector(ctxt
, VCPU_SREG_CS
);
3008 tss
->ss
= get_segment_selector(ctxt
, VCPU_SREG_SS
);
3009 tss
->ds
= get_segment_selector(ctxt
, VCPU_SREG_DS
);
3010 tss
->ldt
= get_segment_selector(ctxt
, VCPU_SREG_LDTR
);
3013 static int load_state_from_tss16(struct x86_emulate_ctxt
*ctxt
,
3014 struct tss_segment_16
*tss
)
3019 ctxt
->_eip
= tss
->ip
;
3020 ctxt
->eflags
= tss
->flag
| 2;
3021 *reg_write(ctxt
, VCPU_REGS_RAX
) = tss
->ax
;
3022 *reg_write(ctxt
, VCPU_REGS_RCX
) = tss
->cx
;
3023 *reg_write(ctxt
, VCPU_REGS_RDX
) = tss
->dx
;
3024 *reg_write(ctxt
, VCPU_REGS_RBX
) = tss
->bx
;
3025 *reg_write(ctxt
, VCPU_REGS_RSP
) = tss
->sp
;
3026 *reg_write(ctxt
, VCPU_REGS_RBP
) = tss
->bp
;
3027 *reg_write(ctxt
, VCPU_REGS_RSI
) = tss
->si
;
3028 *reg_write(ctxt
, VCPU_REGS_RDI
) = tss
->di
;
3031 * SDM says that segment selectors are loaded before segment
3034 set_segment_selector(ctxt
, tss
->ldt
, VCPU_SREG_LDTR
);
3035 set_segment_selector(ctxt
, tss
->es
, VCPU_SREG_ES
);
3036 set_segment_selector(ctxt
, tss
->cs
, VCPU_SREG_CS
);
3037 set_segment_selector(ctxt
, tss
->ss
, VCPU_SREG_SS
);
3038 set_segment_selector(ctxt
, tss
->ds
, VCPU_SREG_DS
);
3043 * Now load segment descriptors. If fault happens at this stage
3044 * it is handled in a context of new task
3046 ret
= __load_segment_descriptor(ctxt
, tss
->ldt
, VCPU_SREG_LDTR
, cpl
,
3047 X86_TRANSFER_TASK_SWITCH
, NULL
);
3048 if (ret
!= X86EMUL_CONTINUE
)
3050 ret
= __load_segment_descriptor(ctxt
, tss
->es
, VCPU_SREG_ES
, cpl
,
3051 X86_TRANSFER_TASK_SWITCH
, NULL
);
3052 if (ret
!= X86EMUL_CONTINUE
)
3054 ret
= __load_segment_descriptor(ctxt
, tss
->cs
, VCPU_SREG_CS
, cpl
,
3055 X86_TRANSFER_TASK_SWITCH
, NULL
);
3056 if (ret
!= X86EMUL_CONTINUE
)
3058 ret
= __load_segment_descriptor(ctxt
, tss
->ss
, VCPU_SREG_SS
, cpl
,
3059 X86_TRANSFER_TASK_SWITCH
, NULL
);
3060 if (ret
!= X86EMUL_CONTINUE
)
3062 ret
= __load_segment_descriptor(ctxt
, tss
->ds
, VCPU_SREG_DS
, cpl
,
3063 X86_TRANSFER_TASK_SWITCH
, NULL
);
3064 if (ret
!= X86EMUL_CONTINUE
)
3067 return X86EMUL_CONTINUE
;
3070 static int task_switch_16(struct x86_emulate_ctxt
*ctxt
,
3071 u16 tss_selector
, u16 old_tss_sel
,
3072 ulong old_tss_base
, struct desc_struct
*new_desc
)
3074 struct tss_segment_16 tss_seg
;
3076 u32 new_tss_base
= get_desc_base(new_desc
);
3078 ret
= linear_read_system(ctxt
, old_tss_base
, &tss_seg
, sizeof(tss_seg
));
3079 if (ret
!= X86EMUL_CONTINUE
)
3082 save_state_to_tss16(ctxt
, &tss_seg
);
3084 ret
= linear_write_system(ctxt
, old_tss_base
, &tss_seg
, sizeof(tss_seg
));
3085 if (ret
!= X86EMUL_CONTINUE
)
3088 ret
= linear_read_system(ctxt
, new_tss_base
, &tss_seg
, sizeof(tss_seg
));
3089 if (ret
!= X86EMUL_CONTINUE
)
3092 if (old_tss_sel
!= 0xffff) {
3093 tss_seg
.prev_task_link
= old_tss_sel
;
3095 ret
= linear_write_system(ctxt
, new_tss_base
,
3096 &tss_seg
.prev_task_link
,
3097 sizeof(tss_seg
.prev_task_link
));
3098 if (ret
!= X86EMUL_CONTINUE
)
3102 return load_state_from_tss16(ctxt
, &tss_seg
);
3105 static void save_state_to_tss32(struct x86_emulate_ctxt
*ctxt
,
3106 struct tss_segment_32
*tss
)
3108 /* CR3 and ldt selector are not saved intentionally */
3109 tss
->eip
= ctxt
->_eip
;
3110 tss
->eflags
= ctxt
->eflags
;
3111 tss
->eax
= reg_read(ctxt
, VCPU_REGS_RAX
);
3112 tss
->ecx
= reg_read(ctxt
, VCPU_REGS_RCX
);
3113 tss
->edx
= reg_read(ctxt
, VCPU_REGS_RDX
);
3114 tss
->ebx
= reg_read(ctxt
, VCPU_REGS_RBX
);
3115 tss
->esp
= reg_read(ctxt
, VCPU_REGS_RSP
);
3116 tss
->ebp
= reg_read(ctxt
, VCPU_REGS_RBP
);
3117 tss
->esi
= reg_read(ctxt
, VCPU_REGS_RSI
);
3118 tss
->edi
= reg_read(ctxt
, VCPU_REGS_RDI
);
3120 tss
->es
= get_segment_selector(ctxt
, VCPU_SREG_ES
);
3121 tss
->cs
= get_segment_selector(ctxt
, VCPU_SREG_CS
);
3122 tss
->ss
= get_segment_selector(ctxt
, VCPU_SREG_SS
);
3123 tss
->ds
= get_segment_selector(ctxt
, VCPU_SREG_DS
);
3124 tss
->fs
= get_segment_selector(ctxt
, VCPU_SREG_FS
);
3125 tss
->gs
= get_segment_selector(ctxt
, VCPU_SREG_GS
);
3128 static int load_state_from_tss32(struct x86_emulate_ctxt
*ctxt
,
3129 struct tss_segment_32
*tss
)
3134 if (ctxt
->ops
->set_cr(ctxt
, 3, tss
->cr3
))
3135 return emulate_gp(ctxt
, 0);
3136 ctxt
->_eip
= tss
->eip
;
3137 ctxt
->eflags
= tss
->eflags
| 2;
3139 /* General purpose registers */
3140 *reg_write(ctxt
, VCPU_REGS_RAX
) = tss
->eax
;
3141 *reg_write(ctxt
, VCPU_REGS_RCX
) = tss
->ecx
;
3142 *reg_write(ctxt
, VCPU_REGS_RDX
) = tss
->edx
;
3143 *reg_write(ctxt
, VCPU_REGS_RBX
) = tss
->ebx
;
3144 *reg_write(ctxt
, VCPU_REGS_RSP
) = tss
->esp
;
3145 *reg_write(ctxt
, VCPU_REGS_RBP
) = tss
->ebp
;
3146 *reg_write(ctxt
, VCPU_REGS_RSI
) = tss
->esi
;
3147 *reg_write(ctxt
, VCPU_REGS_RDI
) = tss
->edi
;
3150 * SDM says that segment selectors are loaded before segment
3151 * descriptors. This is important because CPL checks will
3154 set_segment_selector(ctxt
, tss
->ldt_selector
, VCPU_SREG_LDTR
);
3155 set_segment_selector(ctxt
, tss
->es
, VCPU_SREG_ES
);
3156 set_segment_selector(ctxt
, tss
->cs
, VCPU_SREG_CS
);
3157 set_segment_selector(ctxt
, tss
->ss
, VCPU_SREG_SS
);
3158 set_segment_selector(ctxt
, tss
->ds
, VCPU_SREG_DS
);
3159 set_segment_selector(ctxt
, tss
->fs
, VCPU_SREG_FS
);
3160 set_segment_selector(ctxt
, tss
->gs
, VCPU_SREG_GS
);
3163 * If we're switching between Protected Mode and VM86, we need to make
3164 * sure to update the mode before loading the segment descriptors so
3165 * that the selectors are interpreted correctly.
3167 if (ctxt
->eflags
& X86_EFLAGS_VM
) {
3168 ctxt
->mode
= X86EMUL_MODE_VM86
;
3171 ctxt
->mode
= X86EMUL_MODE_PROT32
;
3176 * Now load segment descriptors. If fault happenes at this stage
3177 * it is handled in a context of new task
3179 ret
= __load_segment_descriptor(ctxt
, tss
->ldt_selector
, VCPU_SREG_LDTR
,
3180 cpl
, X86_TRANSFER_TASK_SWITCH
, NULL
);
3181 if (ret
!= X86EMUL_CONTINUE
)
3183 ret
= __load_segment_descriptor(ctxt
, tss
->es
, VCPU_SREG_ES
, cpl
,
3184 X86_TRANSFER_TASK_SWITCH
, NULL
);
3185 if (ret
!= X86EMUL_CONTINUE
)
3187 ret
= __load_segment_descriptor(ctxt
, tss
->cs
, VCPU_SREG_CS
, cpl
,
3188 X86_TRANSFER_TASK_SWITCH
, NULL
);
3189 if (ret
!= X86EMUL_CONTINUE
)
3191 ret
= __load_segment_descriptor(ctxt
, tss
->ss
, VCPU_SREG_SS
, cpl
,
3192 X86_TRANSFER_TASK_SWITCH
, NULL
);
3193 if (ret
!= X86EMUL_CONTINUE
)
3195 ret
= __load_segment_descriptor(ctxt
, tss
->ds
, VCPU_SREG_DS
, cpl
,
3196 X86_TRANSFER_TASK_SWITCH
, NULL
);
3197 if (ret
!= X86EMUL_CONTINUE
)
3199 ret
= __load_segment_descriptor(ctxt
, tss
->fs
, VCPU_SREG_FS
, cpl
,
3200 X86_TRANSFER_TASK_SWITCH
, NULL
);
3201 if (ret
!= X86EMUL_CONTINUE
)
3203 ret
= __load_segment_descriptor(ctxt
, tss
->gs
, VCPU_SREG_GS
, cpl
,
3204 X86_TRANSFER_TASK_SWITCH
, NULL
);
3209 static int task_switch_32(struct x86_emulate_ctxt
*ctxt
,
3210 u16 tss_selector
, u16 old_tss_sel
,
3211 ulong old_tss_base
, struct desc_struct
*new_desc
)
3213 struct tss_segment_32 tss_seg
;
3215 u32 new_tss_base
= get_desc_base(new_desc
);
3216 u32 eip_offset
= offsetof(struct tss_segment_32
, eip
);
3217 u32 ldt_sel_offset
= offsetof(struct tss_segment_32
, ldt_selector
);
3219 ret
= linear_read_system(ctxt
, old_tss_base
, &tss_seg
, sizeof(tss_seg
));
3220 if (ret
!= X86EMUL_CONTINUE
)
3223 save_state_to_tss32(ctxt
, &tss_seg
);
3225 /* Only GP registers and segment selectors are saved */
3226 ret
= linear_write_system(ctxt
, old_tss_base
+ eip_offset
, &tss_seg
.eip
,
3227 ldt_sel_offset
- eip_offset
);
3228 if (ret
!= X86EMUL_CONTINUE
)
3231 ret
= linear_read_system(ctxt
, new_tss_base
, &tss_seg
, sizeof(tss_seg
));
3232 if (ret
!= X86EMUL_CONTINUE
)
3235 if (old_tss_sel
!= 0xffff) {
3236 tss_seg
.prev_task_link
= old_tss_sel
;
3238 ret
= linear_write_system(ctxt
, new_tss_base
,
3239 &tss_seg
.prev_task_link
,
3240 sizeof(tss_seg
.prev_task_link
));
3241 if (ret
!= X86EMUL_CONTINUE
)
3245 return load_state_from_tss32(ctxt
, &tss_seg
);
3248 static int emulator_do_task_switch(struct x86_emulate_ctxt
*ctxt
,
3249 u16 tss_selector
, int idt_index
, int reason
,
3250 bool has_error_code
, u32 error_code
)
3252 const struct x86_emulate_ops
*ops
= ctxt
->ops
;
3253 struct desc_struct curr_tss_desc
, next_tss_desc
;
3255 u16 old_tss_sel
= get_segment_selector(ctxt
, VCPU_SREG_TR
);
3256 ulong old_tss_base
=
3257 ops
->get_cached_segment_base(ctxt
, VCPU_SREG_TR
);
3259 ulong desc_addr
, dr7
;
3261 /* FIXME: old_tss_base == ~0 ? */
3263 ret
= read_segment_descriptor(ctxt
, tss_selector
, &next_tss_desc
, &desc_addr
);
3264 if (ret
!= X86EMUL_CONTINUE
)
3266 ret
= read_segment_descriptor(ctxt
, old_tss_sel
, &curr_tss_desc
, &desc_addr
);
3267 if (ret
!= X86EMUL_CONTINUE
)
3270 /* FIXME: check that next_tss_desc is tss */
3273 * Check privileges. The three cases are task switch caused by...
3275 * 1. jmp/call/int to task gate: Check against DPL of the task gate
3276 * 2. Exception/IRQ/iret: No check is performed
3277 * 3. jmp/call to TSS/task-gate: No check is performed since the
3278 * hardware checks it before exiting.
3280 if (reason
== TASK_SWITCH_GATE
) {
3281 if (idt_index
!= -1) {
3282 /* Software interrupts */
3283 struct desc_struct task_gate_desc
;
3286 ret
= read_interrupt_descriptor(ctxt
, idt_index
,
3288 if (ret
!= X86EMUL_CONTINUE
)
3291 dpl
= task_gate_desc
.dpl
;
3292 if ((tss_selector
& 3) > dpl
|| ops
->cpl(ctxt
) > dpl
)
3293 return emulate_gp(ctxt
, (idt_index
<< 3) | 0x2);
3297 desc_limit
= desc_limit_scaled(&next_tss_desc
);
3298 if (!next_tss_desc
.p
||
3299 ((desc_limit
< 0x67 && (next_tss_desc
.type
& 8)) ||
3300 desc_limit
< 0x2b)) {
3301 return emulate_ts(ctxt
, tss_selector
& 0xfffc);
3304 if (reason
== TASK_SWITCH_IRET
|| reason
== TASK_SWITCH_JMP
) {
3305 curr_tss_desc
.type
&= ~(1 << 1); /* clear busy flag */
3306 write_segment_descriptor(ctxt
, old_tss_sel
, &curr_tss_desc
);
3309 if (reason
== TASK_SWITCH_IRET
)
3310 ctxt
->eflags
= ctxt
->eflags
& ~X86_EFLAGS_NT
;
3312 /* set back link to prev task only if NT bit is set in eflags
3313 note that old_tss_sel is not used after this point */
3314 if (reason
!= TASK_SWITCH_CALL
&& reason
!= TASK_SWITCH_GATE
)
3315 old_tss_sel
= 0xffff;
3317 if (next_tss_desc
.type
& 8)
3318 ret
= task_switch_32(ctxt
, tss_selector
, old_tss_sel
,
3319 old_tss_base
, &next_tss_desc
);
3321 ret
= task_switch_16(ctxt
, tss_selector
, old_tss_sel
,
3322 old_tss_base
, &next_tss_desc
);
3323 if (ret
!= X86EMUL_CONTINUE
)
3326 if (reason
== TASK_SWITCH_CALL
|| reason
== TASK_SWITCH_GATE
)
3327 ctxt
->eflags
= ctxt
->eflags
| X86_EFLAGS_NT
;
3329 if (reason
!= TASK_SWITCH_IRET
) {
3330 next_tss_desc
.type
|= (1 << 1); /* set busy flag */
3331 write_segment_descriptor(ctxt
, tss_selector
, &next_tss_desc
);
3334 ops
->set_cr(ctxt
, 0, ops
->get_cr(ctxt
, 0) | X86_CR0_TS
);
3335 ops
->set_segment(ctxt
, tss_selector
, &next_tss_desc
, 0, VCPU_SREG_TR
);
3337 if (has_error_code
) {
3338 ctxt
->op_bytes
= ctxt
->ad_bytes
= (next_tss_desc
.type
& 8) ? 4 : 2;
3339 ctxt
->lock_prefix
= 0;
3340 ctxt
->src
.val
= (unsigned long) error_code
;
3341 ret
= em_push(ctxt
);
3344 ops
->get_dr(ctxt
, 7, &dr7
);
3345 ops
->set_dr(ctxt
, 7, dr7
& ~(DR_LOCAL_ENABLE_MASK
| DR_LOCAL_SLOWDOWN
));
3350 int emulator_task_switch(struct x86_emulate_ctxt
*ctxt
,
3351 u16 tss_selector
, int idt_index
, int reason
,
3352 bool has_error_code
, u32 error_code
)
3356 invalidate_registers(ctxt
);
3357 ctxt
->_eip
= ctxt
->eip
;
3358 ctxt
->dst
.type
= OP_NONE
;
3360 rc
= emulator_do_task_switch(ctxt
, tss_selector
, idt_index
, reason
,
3361 has_error_code
, error_code
);
3363 if (rc
== X86EMUL_CONTINUE
) {
3364 ctxt
->eip
= ctxt
->_eip
;
3365 writeback_registers(ctxt
);
3368 return (rc
== X86EMUL_UNHANDLEABLE
) ? EMULATION_FAILED
: EMULATION_OK
;
3371 static void string_addr_inc(struct x86_emulate_ctxt
*ctxt
, int reg
,
3374 int df
= (ctxt
->eflags
& X86_EFLAGS_DF
) ? -op
->count
: op
->count
;
3376 register_address_increment(ctxt
, reg
, df
* op
->bytes
);
3377 op
->addr
.mem
.ea
= register_address(ctxt
, reg
);
3380 static int em_das(struct x86_emulate_ctxt
*ctxt
)
3383 bool af
, cf
, old_cf
;
3385 cf
= ctxt
->eflags
& X86_EFLAGS_CF
;
3391 af
= ctxt
->eflags
& X86_EFLAGS_AF
;
3392 if ((al
& 0x0f) > 9 || af
) {
3394 cf
= old_cf
| (al
>= 250);
3399 if (old_al
> 0x99 || old_cf
) {
3405 /* Set PF, ZF, SF */
3406 ctxt
->src
.type
= OP_IMM
;
3408 ctxt
->src
.bytes
= 1;
3409 fastop(ctxt
, em_or
);
3410 ctxt
->eflags
&= ~(X86_EFLAGS_AF
| X86_EFLAGS_CF
);
3412 ctxt
->eflags
|= X86_EFLAGS_CF
;
3414 ctxt
->eflags
|= X86_EFLAGS_AF
;
3415 return X86EMUL_CONTINUE
;
3418 static int em_aam(struct x86_emulate_ctxt
*ctxt
)
3422 if (ctxt
->src
.val
== 0)
3423 return emulate_de(ctxt
);
3425 al
= ctxt
->dst
.val
& 0xff;
3426 ah
= al
/ ctxt
->src
.val
;
3427 al
%= ctxt
->src
.val
;
3429 ctxt
->dst
.val
= (ctxt
->dst
.val
& 0xffff0000) | al
| (ah
<< 8);
3431 /* Set PF, ZF, SF */
3432 ctxt
->src
.type
= OP_IMM
;
3434 ctxt
->src
.bytes
= 1;
3435 fastop(ctxt
, em_or
);
3437 return X86EMUL_CONTINUE
;
3440 static int em_aad(struct x86_emulate_ctxt
*ctxt
)
3442 u8 al
= ctxt
->dst
.val
& 0xff;
3443 u8 ah
= (ctxt
->dst
.val
>> 8) & 0xff;
3445 al
= (al
+ (ah
* ctxt
->src
.val
)) & 0xff;
3447 ctxt
->dst
.val
= (ctxt
->dst
.val
& 0xffff0000) | al
;
3449 /* Set PF, ZF, SF */
3450 ctxt
->src
.type
= OP_IMM
;
3452 ctxt
->src
.bytes
= 1;
3453 fastop(ctxt
, em_or
);
3455 return X86EMUL_CONTINUE
;
3458 static int em_call(struct x86_emulate_ctxt
*ctxt
)
3461 long rel
= ctxt
->src
.val
;
3463 ctxt
->src
.val
= (unsigned long)ctxt
->_eip
;
3464 rc
= jmp_rel(ctxt
, rel
);
3465 if (rc
!= X86EMUL_CONTINUE
)
3467 return em_push(ctxt
);
3470 static int em_call_far(struct x86_emulate_ctxt
*ctxt
)
3475 struct desc_struct old_desc
, new_desc
;
3476 const struct x86_emulate_ops
*ops
= ctxt
->ops
;
3477 int cpl
= ctxt
->ops
->cpl(ctxt
);
3478 enum x86emul_mode prev_mode
= ctxt
->mode
;
3480 old_eip
= ctxt
->_eip
;
3481 ops
->get_segment(ctxt
, &old_cs
, &old_desc
, NULL
, VCPU_SREG_CS
);
3483 memcpy(&sel
, ctxt
->src
.valptr
+ ctxt
->op_bytes
, 2);
3484 rc
= __load_segment_descriptor(ctxt
, sel
, VCPU_SREG_CS
, cpl
,
3485 X86_TRANSFER_CALL_JMP
, &new_desc
);
3486 if (rc
!= X86EMUL_CONTINUE
)
3489 rc
= assign_eip_far(ctxt
, ctxt
->src
.val
, &new_desc
);
3490 if (rc
!= X86EMUL_CONTINUE
)
3493 ctxt
->src
.val
= old_cs
;
3495 if (rc
!= X86EMUL_CONTINUE
)
3498 ctxt
->src
.val
= old_eip
;
3500 /* If we failed, we tainted the memory, but the very least we should
3502 if (rc
!= X86EMUL_CONTINUE
) {
3503 pr_warn_once("faulting far call emulation tainted memory\n");
3508 ops
->set_segment(ctxt
, old_cs
, &old_desc
, 0, VCPU_SREG_CS
);
3509 ctxt
->mode
= prev_mode
;
3514 static int em_ret_near_imm(struct x86_emulate_ctxt
*ctxt
)
3519 rc
= emulate_pop(ctxt
, &eip
, ctxt
->op_bytes
);
3520 if (rc
!= X86EMUL_CONTINUE
)
3522 rc
= assign_eip_near(ctxt
, eip
);
3523 if (rc
!= X86EMUL_CONTINUE
)
3525 rsp_increment(ctxt
, ctxt
->src
.val
);
3526 return X86EMUL_CONTINUE
;
3529 static int em_xchg(struct x86_emulate_ctxt
*ctxt
)
3531 /* Write back the register source. */
3532 ctxt
->src
.val
= ctxt
->dst
.val
;
3533 write_register_operand(&ctxt
->src
);
3535 /* Write back the memory destination with implicit LOCK prefix. */
3536 ctxt
->dst
.val
= ctxt
->src
.orig_val
;
3537 ctxt
->lock_prefix
= 1;
3538 return X86EMUL_CONTINUE
;
3541 static int em_imul_3op(struct x86_emulate_ctxt
*ctxt
)
3543 ctxt
->dst
.val
= ctxt
->src2
.val
;
3544 return fastop(ctxt
, em_imul
);
3547 static int em_cwd(struct x86_emulate_ctxt
*ctxt
)
3549 ctxt
->dst
.type
= OP_REG
;
3550 ctxt
->dst
.bytes
= ctxt
->src
.bytes
;
3551 ctxt
->dst
.addr
.reg
= reg_rmw(ctxt
, VCPU_REGS_RDX
);
3552 ctxt
->dst
.val
= ~((ctxt
->src
.val
>> (ctxt
->src
.bytes
* 8 - 1)) - 1);
3554 return X86EMUL_CONTINUE
;
3557 static int em_rdpid(struct x86_emulate_ctxt
*ctxt
)
3561 if (ctxt
->ops
->get_msr(ctxt
, MSR_TSC_AUX
, &tsc_aux
))
3562 return emulate_gp(ctxt
, 0);
3563 ctxt
->dst
.val
= tsc_aux
;
3564 return X86EMUL_CONTINUE
;
3567 static int em_rdtsc(struct x86_emulate_ctxt
*ctxt
)
3571 ctxt
->ops
->get_msr(ctxt
, MSR_IA32_TSC
, &tsc
);
3572 *reg_write(ctxt
, VCPU_REGS_RAX
) = (u32
)tsc
;
3573 *reg_write(ctxt
, VCPU_REGS_RDX
) = tsc
>> 32;
3574 return X86EMUL_CONTINUE
;
3577 static int em_rdpmc(struct x86_emulate_ctxt
*ctxt
)
3581 if (ctxt
->ops
->read_pmc(ctxt
, reg_read(ctxt
, VCPU_REGS_RCX
), &pmc
))
3582 return emulate_gp(ctxt
, 0);
3583 *reg_write(ctxt
, VCPU_REGS_RAX
) = (u32
)pmc
;
3584 *reg_write(ctxt
, VCPU_REGS_RDX
) = pmc
>> 32;
3585 return X86EMUL_CONTINUE
;
3588 static int em_mov(struct x86_emulate_ctxt
*ctxt
)
3590 memcpy(ctxt
->dst
.valptr
, ctxt
->src
.valptr
, sizeof(ctxt
->src
.valptr
));
3591 return X86EMUL_CONTINUE
;
3594 #define FFL(x) bit(X86_FEATURE_##x)
3596 static int em_movbe(struct x86_emulate_ctxt
*ctxt
)
3598 u32 ebx
, ecx
, edx
, eax
= 1;
3602 * Check MOVBE is set in the guest-visible CPUID leaf.
3604 ctxt
->ops
->get_cpuid(ctxt
, &eax
, &ebx
, &ecx
, &edx
, false);
3605 if (!(ecx
& FFL(MOVBE
)))
3606 return emulate_ud(ctxt
);
3608 switch (ctxt
->op_bytes
) {
3611 * From MOVBE definition: "...When the operand size is 16 bits,
3612 * the upper word of the destination register remains unchanged
3615 * Both casting ->valptr and ->val to u16 breaks strict aliasing
3616 * rules so we have to do the operation almost per hand.
3618 tmp
= (u16
)ctxt
->src
.val
;
3619 ctxt
->dst
.val
&= ~0xffffUL
;
3620 ctxt
->dst
.val
|= (unsigned long)swab16(tmp
);
3623 ctxt
->dst
.val
= swab32((u32
)ctxt
->src
.val
);
3626 ctxt
->dst
.val
= swab64(ctxt
->src
.val
);
3631 return X86EMUL_CONTINUE
;
3634 static int em_cr_write(struct x86_emulate_ctxt
*ctxt
)
3636 if (ctxt
->ops
->set_cr(ctxt
, ctxt
->modrm_reg
, ctxt
->src
.val
))
3637 return emulate_gp(ctxt
, 0);
3639 /* Disable writeback. */
3640 ctxt
->dst
.type
= OP_NONE
;
3641 return X86EMUL_CONTINUE
;
3644 static int em_dr_write(struct x86_emulate_ctxt
*ctxt
)
3648 if (ctxt
->mode
== X86EMUL_MODE_PROT64
)
3649 val
= ctxt
->src
.val
& ~0ULL;
3651 val
= ctxt
->src
.val
& ~0U;
3653 /* #UD condition is already handled. */
3654 if (ctxt
->ops
->set_dr(ctxt
, ctxt
->modrm_reg
, val
) < 0)
3655 return emulate_gp(ctxt
, 0);
3657 /* Disable writeback. */
3658 ctxt
->dst
.type
= OP_NONE
;
3659 return X86EMUL_CONTINUE
;
3662 static int em_wrmsr(struct x86_emulate_ctxt
*ctxt
)
3666 msr_data
= (u32
)reg_read(ctxt
, VCPU_REGS_RAX
)
3667 | ((u64
)reg_read(ctxt
, VCPU_REGS_RDX
) << 32);
3668 if (ctxt
->ops
->set_msr(ctxt
, reg_read(ctxt
, VCPU_REGS_RCX
), msr_data
))
3669 return emulate_gp(ctxt
, 0);
3671 return X86EMUL_CONTINUE
;
3674 static int em_rdmsr(struct x86_emulate_ctxt
*ctxt
)
3678 if (ctxt
->ops
->get_msr(ctxt
, reg_read(ctxt
, VCPU_REGS_RCX
), &msr_data
))
3679 return emulate_gp(ctxt
, 0);
3681 *reg_write(ctxt
, VCPU_REGS_RAX
) = (u32
)msr_data
;
3682 *reg_write(ctxt
, VCPU_REGS_RDX
) = msr_data
>> 32;
3683 return X86EMUL_CONTINUE
;
3686 static int em_store_sreg(struct x86_emulate_ctxt
*ctxt
, int segment
)
3688 if (segment
> VCPU_SREG_GS
&&
3689 (ctxt
->ops
->get_cr(ctxt
, 4) & X86_CR4_UMIP
) &&
3690 ctxt
->ops
->cpl(ctxt
) > 0)
3691 return emulate_gp(ctxt
, 0);
3693 ctxt
->dst
.val
= get_segment_selector(ctxt
, segment
);
3694 if (ctxt
->dst
.bytes
== 4 && ctxt
->dst
.type
== OP_MEM
)
3695 ctxt
->dst
.bytes
= 2;
3696 return X86EMUL_CONTINUE
;
3699 static int em_mov_rm_sreg(struct x86_emulate_ctxt
*ctxt
)
3701 if (ctxt
->modrm_reg
> VCPU_SREG_GS
)
3702 return emulate_ud(ctxt
);
3704 return em_store_sreg(ctxt
, ctxt
->modrm_reg
);
3707 static int em_mov_sreg_rm(struct x86_emulate_ctxt
*ctxt
)
3709 u16 sel
= ctxt
->src
.val
;
3711 if (ctxt
->modrm_reg
== VCPU_SREG_CS
|| ctxt
->modrm_reg
> VCPU_SREG_GS
)
3712 return emulate_ud(ctxt
);
3714 if (ctxt
->modrm_reg
== VCPU_SREG_SS
)
3715 ctxt
->interruptibility
= KVM_X86_SHADOW_INT_MOV_SS
;
3717 /* Disable writeback. */
3718 ctxt
->dst
.type
= OP_NONE
;
3719 return load_segment_descriptor(ctxt
, sel
, ctxt
->modrm_reg
);
3722 static int em_sldt(struct x86_emulate_ctxt
*ctxt
)
3724 return em_store_sreg(ctxt
, VCPU_SREG_LDTR
);
3727 static int em_lldt(struct x86_emulate_ctxt
*ctxt
)
3729 u16 sel
= ctxt
->src
.val
;
3731 /* Disable writeback. */
3732 ctxt
->dst
.type
= OP_NONE
;
3733 return load_segment_descriptor(ctxt
, sel
, VCPU_SREG_LDTR
);
3736 static int em_str(struct x86_emulate_ctxt
*ctxt
)
3738 return em_store_sreg(ctxt
, VCPU_SREG_TR
);
3741 static int em_ltr(struct x86_emulate_ctxt
*ctxt
)
3743 u16 sel
= ctxt
->src
.val
;
3745 /* Disable writeback. */
3746 ctxt
->dst
.type
= OP_NONE
;
3747 return load_segment_descriptor(ctxt
, sel
, VCPU_SREG_TR
);
3750 static int em_invlpg(struct x86_emulate_ctxt
*ctxt
)
3755 rc
= linearize(ctxt
, ctxt
->src
.addr
.mem
, 1, false, &linear
);
3756 if (rc
== X86EMUL_CONTINUE
)
3757 ctxt
->ops
->invlpg(ctxt
, linear
);
3758 /* Disable writeback. */
3759 ctxt
->dst
.type
= OP_NONE
;
3760 return X86EMUL_CONTINUE
;
3763 static int em_clts(struct x86_emulate_ctxt
*ctxt
)
3767 cr0
= ctxt
->ops
->get_cr(ctxt
, 0);
3769 ctxt
->ops
->set_cr(ctxt
, 0, cr0
);
3770 return X86EMUL_CONTINUE
;
3773 static int em_hypercall(struct x86_emulate_ctxt
*ctxt
)
3775 int rc
= ctxt
->ops
->fix_hypercall(ctxt
);
3777 if (rc
!= X86EMUL_CONTINUE
)
3780 /* Let the processor re-execute the fixed hypercall */
3781 ctxt
->_eip
= ctxt
->eip
;
3782 /* Disable writeback. */
3783 ctxt
->dst
.type
= OP_NONE
;
3784 return X86EMUL_CONTINUE
;
3787 static int emulate_store_desc_ptr(struct x86_emulate_ctxt
*ctxt
,
3788 void (*get
)(struct x86_emulate_ctxt
*ctxt
,
3789 struct desc_ptr
*ptr
))
3791 struct desc_ptr desc_ptr
;
3793 if ((ctxt
->ops
->get_cr(ctxt
, 4) & X86_CR4_UMIP
) &&
3794 ctxt
->ops
->cpl(ctxt
) > 0)
3795 return emulate_gp(ctxt
, 0);
3797 if (ctxt
->mode
== X86EMUL_MODE_PROT64
)
3799 get(ctxt
, &desc_ptr
);
3800 if (ctxt
->op_bytes
== 2) {
3802 desc_ptr
.address
&= 0x00ffffff;
3804 /* Disable writeback. */
3805 ctxt
->dst
.type
= OP_NONE
;
3806 return segmented_write_std(ctxt
, ctxt
->dst
.addr
.mem
,
3807 &desc_ptr
, 2 + ctxt
->op_bytes
);
3810 static int em_sgdt(struct x86_emulate_ctxt
*ctxt
)
3812 return emulate_store_desc_ptr(ctxt
, ctxt
->ops
->get_gdt
);
3815 static int em_sidt(struct x86_emulate_ctxt
*ctxt
)
3817 return emulate_store_desc_ptr(ctxt
, ctxt
->ops
->get_idt
);
3820 static int em_lgdt_lidt(struct x86_emulate_ctxt
*ctxt
, bool lgdt
)
3822 struct desc_ptr desc_ptr
;
3825 if (ctxt
->mode
== X86EMUL_MODE_PROT64
)
3827 rc
= read_descriptor(ctxt
, ctxt
->src
.addr
.mem
,
3828 &desc_ptr
.size
, &desc_ptr
.address
,
3830 if (rc
!= X86EMUL_CONTINUE
)
3832 if (ctxt
->mode
== X86EMUL_MODE_PROT64
&&
3833 emul_is_noncanonical_address(desc_ptr
.address
, ctxt
))
3834 return emulate_gp(ctxt
, 0);
3836 ctxt
->ops
->set_gdt(ctxt
, &desc_ptr
);
3838 ctxt
->ops
->set_idt(ctxt
, &desc_ptr
);
3839 /* Disable writeback. */
3840 ctxt
->dst
.type
= OP_NONE
;
3841 return X86EMUL_CONTINUE
;
3844 static int em_lgdt(struct x86_emulate_ctxt
*ctxt
)
3846 return em_lgdt_lidt(ctxt
, true);
3849 static int em_lidt(struct x86_emulate_ctxt
*ctxt
)
3851 return em_lgdt_lidt(ctxt
, false);
3854 static int em_smsw(struct x86_emulate_ctxt
*ctxt
)
3856 if ((ctxt
->ops
->get_cr(ctxt
, 4) & X86_CR4_UMIP
) &&
3857 ctxt
->ops
->cpl(ctxt
) > 0)
3858 return emulate_gp(ctxt
, 0);
3860 if (ctxt
->dst
.type
== OP_MEM
)
3861 ctxt
->dst
.bytes
= 2;
3862 ctxt
->dst
.val
= ctxt
->ops
->get_cr(ctxt
, 0);
3863 return X86EMUL_CONTINUE
;
3866 static int em_lmsw(struct x86_emulate_ctxt
*ctxt
)
3868 ctxt
->ops
->set_cr(ctxt
, 0, (ctxt
->ops
->get_cr(ctxt
, 0) & ~0x0eul
)
3869 | (ctxt
->src
.val
& 0x0f));
3870 ctxt
->dst
.type
= OP_NONE
;
3871 return X86EMUL_CONTINUE
;
3874 static int em_loop(struct x86_emulate_ctxt
*ctxt
)
3876 int rc
= X86EMUL_CONTINUE
;
3878 register_address_increment(ctxt
, VCPU_REGS_RCX
, -1);
3879 if ((address_mask(ctxt
, reg_read(ctxt
, VCPU_REGS_RCX
)) != 0) &&
3880 (ctxt
->b
== 0xe2 || test_cc(ctxt
->b
^ 0x5, ctxt
->eflags
)))
3881 rc
= jmp_rel(ctxt
, ctxt
->src
.val
);
3886 static int em_jcxz(struct x86_emulate_ctxt
*ctxt
)
3888 int rc
= X86EMUL_CONTINUE
;
3890 if (address_mask(ctxt
, reg_read(ctxt
, VCPU_REGS_RCX
)) == 0)
3891 rc
= jmp_rel(ctxt
, ctxt
->src
.val
);
3896 static int em_in(struct x86_emulate_ctxt
*ctxt
)
3898 if (!pio_in_emulated(ctxt
, ctxt
->dst
.bytes
, ctxt
->src
.val
,
3900 return X86EMUL_IO_NEEDED
;
3902 return X86EMUL_CONTINUE
;
3905 static int em_out(struct x86_emulate_ctxt
*ctxt
)
3907 ctxt
->ops
->pio_out_emulated(ctxt
, ctxt
->src
.bytes
, ctxt
->dst
.val
,
3909 /* Disable writeback. */
3910 ctxt
->dst
.type
= OP_NONE
;
3911 return X86EMUL_CONTINUE
;
3914 static int em_cli(struct x86_emulate_ctxt
*ctxt
)
3916 if (emulator_bad_iopl(ctxt
))
3917 return emulate_gp(ctxt
, 0);
3919 ctxt
->eflags
&= ~X86_EFLAGS_IF
;
3920 return X86EMUL_CONTINUE
;
3923 static int em_sti(struct x86_emulate_ctxt
*ctxt
)
3925 if (emulator_bad_iopl(ctxt
))
3926 return emulate_gp(ctxt
, 0);
3928 ctxt
->interruptibility
= KVM_X86_SHADOW_INT_STI
;
3929 ctxt
->eflags
|= X86_EFLAGS_IF
;
3930 return X86EMUL_CONTINUE
;
3933 static int em_cpuid(struct x86_emulate_ctxt
*ctxt
)
3935 u32 eax
, ebx
, ecx
, edx
;
3938 ctxt
->ops
->get_msr(ctxt
, MSR_MISC_FEATURES_ENABLES
, &msr
);
3939 if (msr
& MSR_MISC_FEATURES_ENABLES_CPUID_FAULT
&&
3940 ctxt
->ops
->cpl(ctxt
)) {
3941 return emulate_gp(ctxt
, 0);
3944 eax
= reg_read(ctxt
, VCPU_REGS_RAX
);
3945 ecx
= reg_read(ctxt
, VCPU_REGS_RCX
);
3946 ctxt
->ops
->get_cpuid(ctxt
, &eax
, &ebx
, &ecx
, &edx
, true);
3947 *reg_write(ctxt
, VCPU_REGS_RAX
) = eax
;
3948 *reg_write(ctxt
, VCPU_REGS_RBX
) = ebx
;
3949 *reg_write(ctxt
, VCPU_REGS_RCX
) = ecx
;
3950 *reg_write(ctxt
, VCPU_REGS_RDX
) = edx
;
3951 return X86EMUL_CONTINUE
;
3954 static int em_sahf(struct x86_emulate_ctxt
*ctxt
)
3958 flags
= X86_EFLAGS_CF
| X86_EFLAGS_PF
| X86_EFLAGS_AF
| X86_EFLAGS_ZF
|
3960 flags
&= *reg_rmw(ctxt
, VCPU_REGS_RAX
) >> 8;
3962 ctxt
->eflags
&= ~0xffUL
;
3963 ctxt
->eflags
|= flags
| X86_EFLAGS_FIXED
;
3964 return X86EMUL_CONTINUE
;
3967 static int em_lahf(struct x86_emulate_ctxt
*ctxt
)
3969 *reg_rmw(ctxt
, VCPU_REGS_RAX
) &= ~0xff00UL
;
3970 *reg_rmw(ctxt
, VCPU_REGS_RAX
) |= (ctxt
->eflags
& 0xff) << 8;
3971 return X86EMUL_CONTINUE
;
3974 static int em_bswap(struct x86_emulate_ctxt
*ctxt
)
3976 switch (ctxt
->op_bytes
) {
3977 #ifdef CONFIG_X86_64
3979 asm("bswap %0" : "+r"(ctxt
->dst
.val
));
3983 asm("bswap %0" : "+r"(*(u32
*)&ctxt
->dst
.val
));
3986 return X86EMUL_CONTINUE
;
3989 static int em_clflush(struct x86_emulate_ctxt
*ctxt
)
3991 /* emulating clflush regardless of cpuid */
3992 return X86EMUL_CONTINUE
;
3995 static int em_movsxd(struct x86_emulate_ctxt
*ctxt
)
3997 ctxt
->dst
.val
= (s32
) ctxt
->src
.val
;
3998 return X86EMUL_CONTINUE
;
4001 static int check_fxsr(struct x86_emulate_ctxt
*ctxt
)
4003 u32 eax
= 1, ebx
, ecx
= 0, edx
;
4005 ctxt
->ops
->get_cpuid(ctxt
, &eax
, &ebx
, &ecx
, &edx
, false);
4006 if (!(edx
& FFL(FXSR
)))
4007 return emulate_ud(ctxt
);
4009 if (ctxt
->ops
->get_cr(ctxt
, 0) & (X86_CR0_TS
| X86_CR0_EM
))
4010 return emulate_nm(ctxt
);
4013 * Don't emulate a case that should never be hit, instead of working
4014 * around a lack of fxsave64/fxrstor64 on old compilers.
4016 if (ctxt
->mode
>= X86EMUL_MODE_PROT64
)
4017 return X86EMUL_UNHANDLEABLE
;
4019 return X86EMUL_CONTINUE
;
4023 * Hardware doesn't save and restore XMM 0-7 without CR4.OSFXSR, but does save
4024 * and restore MXCSR.
4026 static size_t __fxstate_size(int nregs
)
4028 return offsetof(struct fxregs_state
, xmm_space
[0]) + nregs
* 16;
4031 static inline size_t fxstate_size(struct x86_emulate_ctxt
*ctxt
)
4034 if (ctxt
->mode
== X86EMUL_MODE_PROT64
)
4035 return __fxstate_size(16);
4037 cr4_osfxsr
= ctxt
->ops
->get_cr(ctxt
, 4) & X86_CR4_OSFXSR
;
4038 return __fxstate_size(cr4_osfxsr
? 8 : 0);
4042 * FXSAVE and FXRSTOR have 4 different formats depending on execution mode,
4045 * - like (1), but FIP and FDP (foo) are only 16 bit. At least Intel CPUs
4046 * preserve whole 32 bit values, though, so (1) and (2) are the same wrt.
4048 * 3) 64-bit mode with REX.W prefix
4049 * - like (2), but XMM 8-15 are being saved and restored
4050 * 4) 64-bit mode without REX.W prefix
4051 * - like (3), but FIP and FDP are 64 bit
4053 * Emulation uses (3) for (1) and (2) and preserves XMM 8-15 to reach the
4054 * desired result. (4) is not emulated.
4056 * Note: Guest and host CPUID.(EAX=07H,ECX=0H):EBX[bit 13] (deprecate FPU CS
4057 * and FPU DS) should match.
4059 static int em_fxsave(struct x86_emulate_ctxt
*ctxt
)
4061 struct fxregs_state fx_state
;
4064 rc
= check_fxsr(ctxt
);
4065 if (rc
!= X86EMUL_CONTINUE
)
4068 rc
= asm_safe("fxsave %[fx]", , [fx
] "+m"(fx_state
));
4070 if (rc
!= X86EMUL_CONTINUE
)
4073 return segmented_write_std(ctxt
, ctxt
->memop
.addr
.mem
, &fx_state
,
4074 fxstate_size(ctxt
));
4078 * FXRSTOR might restore XMM registers not provided by the guest. Fill
4079 * in the host registers (via FXSAVE) instead, so they won't be modified.
4080 * (preemption has to stay disabled until FXRSTOR).
4082 * Use noinline to keep the stack for other functions called by callers small.
4084 static noinline
int fxregs_fixup(struct fxregs_state
*fx_state
,
4085 const size_t used_size
)
4087 struct fxregs_state fx_tmp
;
4090 rc
= asm_safe("fxsave %[fx]", , [fx
] "+m"(fx_tmp
));
4091 memcpy((void *)fx_state
+ used_size
, (void *)&fx_tmp
+ used_size
,
4092 __fxstate_size(16) - used_size
);
4097 static int em_fxrstor(struct x86_emulate_ctxt
*ctxt
)
4099 struct fxregs_state fx_state
;
4103 rc
= check_fxsr(ctxt
);
4104 if (rc
!= X86EMUL_CONTINUE
)
4107 size
= fxstate_size(ctxt
);
4108 rc
= segmented_read_std(ctxt
, ctxt
->memop
.addr
.mem
, &fx_state
, size
);
4109 if (rc
!= X86EMUL_CONTINUE
)
4112 if (size
< __fxstate_size(16)) {
4113 rc
= fxregs_fixup(&fx_state
, size
);
4114 if (rc
!= X86EMUL_CONTINUE
)
4118 if (fx_state
.mxcsr
>> 16) {
4119 rc
= emulate_gp(ctxt
, 0);
4123 if (rc
== X86EMUL_CONTINUE
)
4124 rc
= asm_safe("fxrstor %[fx]", : [fx
] "m"(fx_state
));
4130 static bool valid_cr(int nr
)
4142 static int check_cr_read(struct x86_emulate_ctxt
*ctxt
)
4144 if (!valid_cr(ctxt
->modrm_reg
))
4145 return emulate_ud(ctxt
);
4147 return X86EMUL_CONTINUE
;
4150 static int check_cr_write(struct x86_emulate_ctxt
*ctxt
)
4152 u64 new_val
= ctxt
->src
.val64
;
4153 int cr
= ctxt
->modrm_reg
;
4156 static u64 cr_reserved_bits
[] = {
4157 0xffffffff00000000ULL
,
4158 0, 0, 0, /* CR3 checked later */
4165 return emulate_ud(ctxt
);
4167 if (new_val
& cr_reserved_bits
[cr
])
4168 return emulate_gp(ctxt
, 0);
4173 if (((new_val
& X86_CR0_PG
) && !(new_val
& X86_CR0_PE
)) ||
4174 ((new_val
& X86_CR0_NW
) && !(new_val
& X86_CR0_CD
)))
4175 return emulate_gp(ctxt
, 0);
4177 cr4
= ctxt
->ops
->get_cr(ctxt
, 4);
4178 ctxt
->ops
->get_msr(ctxt
, MSR_EFER
, &efer
);
4180 if ((new_val
& X86_CR0_PG
) && (efer
& EFER_LME
) &&
4181 !(cr4
& X86_CR4_PAE
))
4182 return emulate_gp(ctxt
, 0);
4189 ctxt
->ops
->get_msr(ctxt
, MSR_EFER
, &efer
);
4190 if (efer
& EFER_LMA
) {
4192 u32 eax
, ebx
, ecx
, edx
;
4196 if (ctxt
->ops
->get_cpuid(ctxt
, &eax
, &ebx
, &ecx
,
4198 maxphyaddr
= eax
& 0xff;
4201 rsvd
= rsvd_bits(maxphyaddr
, 63);
4202 if (ctxt
->ops
->get_cr(ctxt
, 4) & X86_CR4_PCIDE
)
4203 rsvd
&= ~X86_CR3_PCID_NOFLUSH
;
4207 return emulate_gp(ctxt
, 0);
4212 ctxt
->ops
->get_msr(ctxt
, MSR_EFER
, &efer
);
4214 if ((efer
& EFER_LMA
) && !(new_val
& X86_CR4_PAE
))
4215 return emulate_gp(ctxt
, 0);
4221 return X86EMUL_CONTINUE
;
4224 static int check_dr7_gd(struct x86_emulate_ctxt
*ctxt
)
4228 ctxt
->ops
->get_dr(ctxt
, 7, &dr7
);
4230 /* Check if DR7.Global_Enable is set */
4231 return dr7
& (1 << 13);
4234 static int check_dr_read(struct x86_emulate_ctxt
*ctxt
)
4236 int dr
= ctxt
->modrm_reg
;
4240 return emulate_ud(ctxt
);
4242 cr4
= ctxt
->ops
->get_cr(ctxt
, 4);
4243 if ((cr4
& X86_CR4_DE
) && (dr
== 4 || dr
== 5))
4244 return emulate_ud(ctxt
);
4246 if (check_dr7_gd(ctxt
)) {
4249 ctxt
->ops
->get_dr(ctxt
, 6, &dr6
);
4251 dr6
|= DR6_BD
| DR6_RTM
;
4252 ctxt
->ops
->set_dr(ctxt
, 6, dr6
);
4253 return emulate_db(ctxt
);
4256 return X86EMUL_CONTINUE
;
4259 static int check_dr_write(struct x86_emulate_ctxt
*ctxt
)
4261 u64 new_val
= ctxt
->src
.val64
;
4262 int dr
= ctxt
->modrm_reg
;
4264 if ((dr
== 6 || dr
== 7) && (new_val
& 0xffffffff00000000ULL
))
4265 return emulate_gp(ctxt
, 0);
4267 return check_dr_read(ctxt
);
4270 static int check_svme(struct x86_emulate_ctxt
*ctxt
)
4274 ctxt
->ops
->get_msr(ctxt
, MSR_EFER
, &efer
);
4276 if (!(efer
& EFER_SVME
))
4277 return emulate_ud(ctxt
);
4279 return X86EMUL_CONTINUE
;
4282 static int check_svme_pa(struct x86_emulate_ctxt
*ctxt
)
4284 u64 rax
= reg_read(ctxt
, VCPU_REGS_RAX
);
4286 /* Valid physical address? */
4287 if (rax
& 0xffff000000000000ULL
)
4288 return emulate_gp(ctxt
, 0);
4290 return check_svme(ctxt
);
4293 static int check_rdtsc(struct x86_emulate_ctxt
*ctxt
)
4295 u64 cr4
= ctxt
->ops
->get_cr(ctxt
, 4);
4297 if (cr4
& X86_CR4_TSD
&& ctxt
->ops
->cpl(ctxt
))
4298 return emulate_ud(ctxt
);
4300 return X86EMUL_CONTINUE
;
4303 static int check_rdpmc(struct x86_emulate_ctxt
*ctxt
)
4305 u64 cr4
= ctxt
->ops
->get_cr(ctxt
, 4);
4306 u64 rcx
= reg_read(ctxt
, VCPU_REGS_RCX
);
4309 * VMware allows access to these Pseduo-PMCs even when read via RDPMC
4310 * in Ring3 when CR4.PCE=0.
4312 if (enable_vmware_backdoor
&& is_vmware_backdoor_pmc(rcx
))
4313 return X86EMUL_CONTINUE
;
4315 if ((!(cr4
& X86_CR4_PCE
) && ctxt
->ops
->cpl(ctxt
)) ||
4316 ctxt
->ops
->check_pmc(ctxt
, rcx
))
4317 return emulate_gp(ctxt
, 0);
4319 return X86EMUL_CONTINUE
;
4322 static int check_perm_in(struct x86_emulate_ctxt
*ctxt
)
4324 ctxt
->dst
.bytes
= min(ctxt
->dst
.bytes
, 4u);
4325 if (!emulator_io_permited(ctxt
, ctxt
->src
.val
, ctxt
->dst
.bytes
))
4326 return emulate_gp(ctxt
, 0);
4328 return X86EMUL_CONTINUE
;
4331 static int check_perm_out(struct x86_emulate_ctxt
*ctxt
)
4333 ctxt
->src
.bytes
= min(ctxt
->src
.bytes
, 4u);
4334 if (!emulator_io_permited(ctxt
, ctxt
->dst
.val
, ctxt
->src
.bytes
))
4335 return emulate_gp(ctxt
, 0);
4337 return X86EMUL_CONTINUE
;
4340 #define D(_y) { .flags = (_y) }
4341 #define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
4342 #define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
4343 .intercept = x86_intercept_##_i, .check_perm = (_p) }
4344 #define N D(NotImpl)
4345 #define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
4346 #define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
4347 #define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
4348 #define ID(_f, _i) { .flags = ((_f) | InstrDual | ModRM), .u.idual = (_i) }
4349 #define MD(_f, _m) { .flags = ((_f) | ModeDual), .u.mdual = (_m) }
4350 #define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
4351 #define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
4352 #define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
4353 #define II(_f, _e, _i) \
4354 { .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
4355 #define IIP(_f, _e, _i, _p) \
4356 { .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
4357 .intercept = x86_intercept_##_i, .check_perm = (_p) }
4358 #define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
4360 #define D2bv(_f) D((_f) | ByteOp), D(_f)
4361 #define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
4362 #define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
4363 #define F2bv(_f, _e) F((_f) | ByteOp, _e), F(_f, _e)
4364 #define I2bvIP(_f, _e, _i, _p) \
4365 IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
4367 #define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e), \
4368 F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
4369 F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
4371 static const struct opcode group7_rm0
[] = {
4373 I(SrcNone
| Priv
| EmulateOnUD
, em_hypercall
),
4377 static const struct opcode group7_rm1
[] = {
4378 DI(SrcNone
| Priv
, monitor
),
4379 DI(SrcNone
| Priv
, mwait
),
4383 static const struct opcode group7_rm3
[] = {
4384 DIP(SrcNone
| Prot
| Priv
, vmrun
, check_svme_pa
),
4385 II(SrcNone
| Prot
| EmulateOnUD
, em_hypercall
, vmmcall
),
4386 DIP(SrcNone
| Prot
| Priv
, vmload
, check_svme_pa
),
4387 DIP(SrcNone
| Prot
| Priv
, vmsave
, check_svme_pa
),
4388 DIP(SrcNone
| Prot
| Priv
, stgi
, check_svme
),
4389 DIP(SrcNone
| Prot
| Priv
, clgi
, check_svme
),
4390 DIP(SrcNone
| Prot
| Priv
, skinit
, check_svme
),
4391 DIP(SrcNone
| Prot
| Priv
, invlpga
, check_svme
),
4394 static const struct opcode group7_rm7
[] = {
4396 DIP(SrcNone
, rdtscp
, check_rdtsc
),
4400 static const struct opcode group1
[] = {
4402 F(Lock
| PageTable
, em_or
),
4405 F(Lock
| PageTable
, em_and
),
4411 static const struct opcode group1A
[] = {
4412 I(DstMem
| SrcNone
| Mov
| Stack
| IncSP
| TwoMemOp
, em_pop
), N
, N
, N
, N
, N
, N
, N
,
4415 static const struct opcode group2
[] = {
4416 F(DstMem
| ModRM
, em_rol
),
4417 F(DstMem
| ModRM
, em_ror
),
4418 F(DstMem
| ModRM
, em_rcl
),
4419 F(DstMem
| ModRM
, em_rcr
),
4420 F(DstMem
| ModRM
, em_shl
),
4421 F(DstMem
| ModRM
, em_shr
),
4422 F(DstMem
| ModRM
, em_shl
),
4423 F(DstMem
| ModRM
, em_sar
),
4426 static const struct opcode group3
[] = {
4427 F(DstMem
| SrcImm
| NoWrite
, em_test
),
4428 F(DstMem
| SrcImm
| NoWrite
, em_test
),
4429 F(DstMem
| SrcNone
| Lock
, em_not
),
4430 F(DstMem
| SrcNone
| Lock
, em_neg
),
4431 F(DstXacc
| Src2Mem
, em_mul_ex
),
4432 F(DstXacc
| Src2Mem
, em_imul_ex
),
4433 F(DstXacc
| Src2Mem
, em_div_ex
),
4434 F(DstXacc
| Src2Mem
, em_idiv_ex
),
4437 static const struct opcode group4
[] = {
4438 F(ByteOp
| DstMem
| SrcNone
| Lock
, em_inc
),
4439 F(ByteOp
| DstMem
| SrcNone
| Lock
, em_dec
),
4443 static const struct opcode group5
[] = {
4444 F(DstMem
| SrcNone
| Lock
, em_inc
),
4445 F(DstMem
| SrcNone
| Lock
, em_dec
),
4446 I(SrcMem
| NearBranch
, em_call_near_abs
),
4447 I(SrcMemFAddr
| ImplicitOps
, em_call_far
),
4448 I(SrcMem
| NearBranch
, em_jmp_abs
),
4449 I(SrcMemFAddr
| ImplicitOps
, em_jmp_far
),
4450 I(SrcMem
| Stack
| TwoMemOp
, em_push
), D(Undefined
),
4453 static const struct opcode group6
[] = {
4454 II(Prot
| DstMem
, em_sldt
, sldt
),
4455 II(Prot
| DstMem
, em_str
, str
),
4456 II(Prot
| Priv
| SrcMem16
, em_lldt
, lldt
),
4457 II(Prot
| Priv
| SrcMem16
, em_ltr
, ltr
),
4461 static const struct group_dual group7
= { {
4462 II(Mov
| DstMem
, em_sgdt
, sgdt
),
4463 II(Mov
| DstMem
, em_sidt
, sidt
),
4464 II(SrcMem
| Priv
, em_lgdt
, lgdt
),
4465 II(SrcMem
| Priv
, em_lidt
, lidt
),
4466 II(SrcNone
| DstMem
| Mov
, em_smsw
, smsw
), N
,
4467 II(SrcMem16
| Mov
| Priv
, em_lmsw
, lmsw
),
4468 II(SrcMem
| ByteOp
| Priv
| NoAccess
, em_invlpg
, invlpg
),
4472 N
, EXT(0, group7_rm3
),
4473 II(SrcNone
| DstMem
| Mov
, em_smsw
, smsw
), N
,
4474 II(SrcMem16
| Mov
| Priv
, em_lmsw
, lmsw
),
4478 static const struct opcode group8
[] = {
4480 F(DstMem
| SrcImmByte
| NoWrite
, em_bt
),
4481 F(DstMem
| SrcImmByte
| Lock
| PageTable
, em_bts
),
4482 F(DstMem
| SrcImmByte
| Lock
, em_btr
),
4483 F(DstMem
| SrcImmByte
| Lock
| PageTable
, em_btc
),
4487 * The "memory" destination is actually always a register, since we come
4488 * from the register case of group9.
4490 static const struct gprefix pfx_0f_c7_7
= {
4491 N
, N
, N
, II(DstMem
| ModRM
| Op3264
| EmulateOnUD
, em_rdpid
, rdtscp
),
4495 static const struct group_dual group9
= { {
4496 N
, I(DstMem64
| Lock
| PageTable
, em_cmpxchg8b
), N
, N
, N
, N
, N
, N
,
4498 N
, N
, N
, N
, N
, N
, N
,
4499 GP(0, &pfx_0f_c7_7
),
4502 static const struct opcode group11
[] = {
4503 I(DstMem
| SrcImm
| Mov
| PageTable
, em_mov
),
4507 static const struct gprefix pfx_0f_ae_7
= {
4508 I(SrcMem
| ByteOp
, em_clflush
), N
, N
, N
,
4511 static const struct group_dual group15
= { {
4512 I(ModRM
| Aligned16
, em_fxsave
),
4513 I(ModRM
| Aligned16
, em_fxrstor
),
4514 N
, N
, N
, N
, N
, GP(0, &pfx_0f_ae_7
),
4516 N
, N
, N
, N
, N
, N
, N
, N
,
4519 static const struct gprefix pfx_0f_6f_0f_7f
= {
4520 I(Mmx
, em_mov
), I(Sse
| Aligned
, em_mov
), N
, I(Sse
| Unaligned
, em_mov
),
4523 static const struct instr_dual instr_dual_0f_2b
= {
4527 static const struct gprefix pfx_0f_2b
= {
4528 ID(0, &instr_dual_0f_2b
), ID(0, &instr_dual_0f_2b
), N
, N
,
4531 static const struct gprefix pfx_0f_10_0f_11
= {
4532 I(Unaligned
, em_mov
), I(Unaligned
, em_mov
), N
, N
,
4535 static const struct gprefix pfx_0f_28_0f_29
= {
4536 I(Aligned
, em_mov
), I(Aligned
, em_mov
), N
, N
,
4539 static const struct gprefix pfx_0f_e7
= {
4540 N
, I(Sse
, em_mov
), N
, N
,
4543 static const struct escape escape_d9
= { {
4544 N
, N
, N
, N
, N
, N
, N
, I(DstMem16
| Mov
, em_fnstcw
),
4547 N
, N
, N
, N
, N
, N
, N
, N
,
4549 N
, N
, N
, N
, N
, N
, N
, N
,
4551 N
, N
, N
, N
, N
, N
, N
, N
,
4553 N
, N
, N
, N
, N
, N
, N
, N
,
4555 N
, N
, N
, N
, N
, N
, N
, N
,
4557 N
, N
, N
, N
, N
, N
, N
, N
,
4559 N
, N
, N
, N
, N
, N
, N
, N
,
4561 N
, N
, N
, N
, N
, N
, N
, N
,
4564 static const struct escape escape_db
= { {
4565 N
, N
, N
, N
, N
, N
, N
, N
,
4568 N
, N
, N
, N
, N
, N
, N
, N
,
4570 N
, N
, N
, N
, N
, N
, N
, N
,
4572 N
, N
, N
, N
, N
, N
, N
, N
,
4574 N
, N
, N
, N
, N
, N
, N
, N
,
4576 N
, N
, N
, I(ImplicitOps
, em_fninit
), N
, N
, N
, N
,
4578 N
, N
, N
, N
, N
, N
, N
, N
,
4580 N
, N
, N
, N
, N
, N
, N
, N
,
4582 N
, N
, N
, N
, N
, N
, N
, N
,
4585 static const struct escape escape_dd
= { {
4586 N
, N
, N
, N
, N
, N
, N
, I(DstMem16
| Mov
, em_fnstsw
),
4589 N
, N
, N
, N
, N
, N
, N
, N
,
4591 N
, N
, N
, N
, N
, N
, N
, N
,
4593 N
, N
, N
, N
, N
, N
, N
, N
,
4595 N
, N
, N
, N
, N
, N
, N
, N
,
4597 N
, N
, N
, N
, N
, N
, N
, N
,
4599 N
, N
, N
, N
, N
, N
, N
, N
,
4601 N
, N
, N
, N
, N
, N
, N
, N
,
4603 N
, N
, N
, N
, N
, N
, N
, N
,
4606 static const struct instr_dual instr_dual_0f_c3
= {
4607 I(DstMem
| SrcReg
| ModRM
| No16
| Mov
, em_mov
), N
4610 static const struct mode_dual mode_dual_63
= {
4611 N
, I(DstReg
| SrcMem32
| ModRM
| Mov
, em_movsxd
)
4614 static const struct opcode opcode_table
[256] = {
4616 F6ALU(Lock
, em_add
),
4617 I(ImplicitOps
| Stack
| No64
| Src2ES
, em_push_sreg
),
4618 I(ImplicitOps
| Stack
| No64
| Src2ES
, em_pop_sreg
),
4620 F6ALU(Lock
| PageTable
, em_or
),
4621 I(ImplicitOps
| Stack
| No64
| Src2CS
, em_push_sreg
),
4624 F6ALU(Lock
, em_adc
),
4625 I(ImplicitOps
| Stack
| No64
| Src2SS
, em_push_sreg
),
4626 I(ImplicitOps
| Stack
| No64
| Src2SS
, em_pop_sreg
),
4628 F6ALU(Lock
, em_sbb
),
4629 I(ImplicitOps
| Stack
| No64
| Src2DS
, em_push_sreg
),
4630 I(ImplicitOps
| Stack
| No64
| Src2DS
, em_pop_sreg
),
4632 F6ALU(Lock
| PageTable
, em_and
), N
, N
,
4634 F6ALU(Lock
, em_sub
), N
, I(ByteOp
| DstAcc
| No64
, em_das
),
4636 F6ALU(Lock
, em_xor
), N
, N
,
4638 F6ALU(NoWrite
, em_cmp
), N
, N
,
4640 X8(F(DstReg
, em_inc
)), X8(F(DstReg
, em_dec
)),
4642 X8(I(SrcReg
| Stack
, em_push
)),
4644 X8(I(DstReg
| Stack
, em_pop
)),
4646 I(ImplicitOps
| Stack
| No64
, em_pusha
),
4647 I(ImplicitOps
| Stack
| No64
, em_popa
),
4648 N
, MD(ModRM
, &mode_dual_63
),
4651 I(SrcImm
| Mov
| Stack
, em_push
),
4652 I(DstReg
| SrcMem
| ModRM
| Src2Imm
, em_imul_3op
),
4653 I(SrcImmByte
| Mov
| Stack
, em_push
),
4654 I(DstReg
| SrcMem
| ModRM
| Src2ImmByte
, em_imul_3op
),
4655 I2bvIP(DstDI
| SrcDX
| Mov
| String
| Unaligned
, em_in
, ins
, check_perm_in
), /* insb, insw/insd */
4656 I2bvIP(SrcSI
| DstDX
| String
, em_out
, outs
, check_perm_out
), /* outsb, outsw/outsd */
4658 X16(D(SrcImmByte
| NearBranch
)),
4660 G(ByteOp
| DstMem
| SrcImm
, group1
),
4661 G(DstMem
| SrcImm
, group1
),
4662 G(ByteOp
| DstMem
| SrcImm
| No64
, group1
),
4663 G(DstMem
| SrcImmByte
, group1
),
4664 F2bv(DstMem
| SrcReg
| ModRM
| NoWrite
, em_test
),
4665 I2bv(DstMem
| SrcReg
| ModRM
| Lock
| PageTable
, em_xchg
),
4667 I2bv(DstMem
| SrcReg
| ModRM
| Mov
| PageTable
, em_mov
),
4668 I2bv(DstReg
| SrcMem
| ModRM
| Mov
, em_mov
),
4669 I(DstMem
| SrcNone
| ModRM
| Mov
| PageTable
, em_mov_rm_sreg
),
4670 D(ModRM
| SrcMem
| NoAccess
| DstReg
),
4671 I(ImplicitOps
| SrcMem16
| ModRM
, em_mov_sreg_rm
),
4674 DI(SrcAcc
| DstReg
, pause
), X7(D(SrcAcc
| DstReg
)),
4676 D(DstAcc
| SrcNone
), I(ImplicitOps
| SrcAcc
, em_cwd
),
4677 I(SrcImmFAddr
| No64
, em_call_far
), N
,
4678 II(ImplicitOps
| Stack
, em_pushf
, pushf
),
4679 II(ImplicitOps
| Stack
, em_popf
, popf
),
4680 I(ImplicitOps
, em_sahf
), I(ImplicitOps
, em_lahf
),
4682 I2bv(DstAcc
| SrcMem
| Mov
| MemAbs
, em_mov
),
4683 I2bv(DstMem
| SrcAcc
| Mov
| MemAbs
| PageTable
, em_mov
),
4684 I2bv(SrcSI
| DstDI
| Mov
| String
| TwoMemOp
, em_mov
),
4685 F2bv(SrcSI
| DstDI
| String
| NoWrite
| TwoMemOp
, em_cmp_r
),
4687 F2bv(DstAcc
| SrcImm
| NoWrite
, em_test
),
4688 I2bv(SrcAcc
| DstDI
| Mov
| String
, em_mov
),
4689 I2bv(SrcSI
| DstAcc
| Mov
| String
, em_mov
),
4690 F2bv(SrcAcc
| DstDI
| String
| NoWrite
, em_cmp_r
),
4692 X8(I(ByteOp
| DstReg
| SrcImm
| Mov
, em_mov
)),
4694 X8(I(DstReg
| SrcImm64
| Mov
, em_mov
)),
4696 G(ByteOp
| Src2ImmByte
, group2
), G(Src2ImmByte
, group2
),
4697 I(ImplicitOps
| NearBranch
| SrcImmU16
, em_ret_near_imm
),
4698 I(ImplicitOps
| NearBranch
, em_ret
),
4699 I(DstReg
| SrcMemFAddr
| ModRM
| No64
| Src2ES
, em_lseg
),
4700 I(DstReg
| SrcMemFAddr
| ModRM
| No64
| Src2DS
, em_lseg
),
4701 G(ByteOp
, group11
), G(0, group11
),
4703 I(Stack
| SrcImmU16
| Src2ImmByte
, em_enter
), I(Stack
, em_leave
),
4704 I(ImplicitOps
| SrcImmU16
, em_ret_far_imm
),
4705 I(ImplicitOps
, em_ret_far
),
4706 D(ImplicitOps
), DI(SrcImmByte
, intn
),
4707 D(ImplicitOps
| No64
), II(ImplicitOps
, em_iret
, iret
),
4709 G(Src2One
| ByteOp
, group2
), G(Src2One
, group2
),
4710 G(Src2CL
| ByteOp
, group2
), G(Src2CL
, group2
),
4711 I(DstAcc
| SrcImmUByte
| No64
, em_aam
),
4712 I(DstAcc
| SrcImmUByte
| No64
, em_aad
),
4713 F(DstAcc
| ByteOp
| No64
, em_salc
),
4714 I(DstAcc
| SrcXLat
| ByteOp
, em_mov
),
4716 N
, E(0, &escape_d9
), N
, E(0, &escape_db
), N
, E(0, &escape_dd
), N
, N
,
4718 X3(I(SrcImmByte
| NearBranch
, em_loop
)),
4719 I(SrcImmByte
| NearBranch
, em_jcxz
),
4720 I2bvIP(SrcImmUByte
| DstAcc
, em_in
, in
, check_perm_in
),
4721 I2bvIP(SrcAcc
| DstImmUByte
, em_out
, out
, check_perm_out
),
4723 I(SrcImm
| NearBranch
, em_call
), D(SrcImm
| ImplicitOps
| NearBranch
),
4724 I(SrcImmFAddr
| No64
, em_jmp_far
),
4725 D(SrcImmByte
| ImplicitOps
| NearBranch
),
4726 I2bvIP(SrcDX
| DstAcc
, em_in
, in
, check_perm_in
),
4727 I2bvIP(SrcAcc
| DstDX
, em_out
, out
, check_perm_out
),
4729 N
, DI(ImplicitOps
, icebp
), N
, N
,
4730 DI(ImplicitOps
| Priv
, hlt
), D(ImplicitOps
),
4731 G(ByteOp
, group3
), G(0, group3
),
4733 D(ImplicitOps
), D(ImplicitOps
),
4734 I(ImplicitOps
, em_cli
), I(ImplicitOps
, em_sti
),
4735 D(ImplicitOps
), D(ImplicitOps
), G(0, group4
), G(0, group5
),
4738 static const struct opcode twobyte_table
[256] = {
4740 G(0, group6
), GD(0, &group7
), N
, N
,
4741 N
, I(ImplicitOps
| EmulateOnUD
, em_syscall
),
4742 II(ImplicitOps
| Priv
, em_clts
, clts
), N
,
4743 DI(ImplicitOps
| Priv
, invd
), DI(ImplicitOps
| Priv
, wbinvd
), N
, N
,
4744 N
, D(ImplicitOps
| ModRM
| SrcMem
| NoAccess
), N
, N
,
4746 GP(ModRM
| DstReg
| SrcMem
| Mov
| Sse
, &pfx_0f_10_0f_11
),
4747 GP(ModRM
| DstMem
| SrcReg
| Mov
| Sse
, &pfx_0f_10_0f_11
),
4749 D(ImplicitOps
| ModRM
| SrcMem
| NoAccess
),
4750 N
, N
, N
, N
, N
, N
, D(ImplicitOps
| ModRM
| SrcMem
| NoAccess
),
4752 DIP(ModRM
| DstMem
| Priv
| Op3264
| NoMod
, cr_read
, check_cr_read
),
4753 DIP(ModRM
| DstMem
| Priv
| Op3264
| NoMod
, dr_read
, check_dr_read
),
4754 IIP(ModRM
| SrcMem
| Priv
| Op3264
| NoMod
, em_cr_write
, cr_write
,
4756 IIP(ModRM
| SrcMem
| Priv
| Op3264
| NoMod
, em_dr_write
, dr_write
,
4759 GP(ModRM
| DstReg
| SrcMem
| Mov
| Sse
, &pfx_0f_28_0f_29
),
4760 GP(ModRM
| DstMem
| SrcReg
| Mov
| Sse
, &pfx_0f_28_0f_29
),
4761 N
, GP(ModRM
| DstMem
| SrcReg
| Mov
| Sse
, &pfx_0f_2b
),
4764 II(ImplicitOps
| Priv
, em_wrmsr
, wrmsr
),
4765 IIP(ImplicitOps
, em_rdtsc
, rdtsc
, check_rdtsc
),
4766 II(ImplicitOps
| Priv
, em_rdmsr
, rdmsr
),
4767 IIP(ImplicitOps
, em_rdpmc
, rdpmc
, check_rdpmc
),
4768 I(ImplicitOps
| EmulateOnUD
, em_sysenter
),
4769 I(ImplicitOps
| Priv
| EmulateOnUD
, em_sysexit
),
4771 N
, N
, N
, N
, N
, N
, N
, N
,
4773 X16(D(DstReg
| SrcMem
| ModRM
)),
4775 N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
,
4780 N
, N
, N
, GP(SrcMem
| DstReg
| ModRM
| Mov
, &pfx_0f_6f_0f_7f
),
4785 N
, N
, N
, GP(SrcReg
| DstMem
| ModRM
| Mov
, &pfx_0f_6f_0f_7f
),
4787 X16(D(SrcImm
| NearBranch
)),
4789 X16(D(ByteOp
| DstMem
| SrcNone
| ModRM
| Mov
)),
4791 I(Stack
| Src2FS
, em_push_sreg
), I(Stack
| Src2FS
, em_pop_sreg
),
4792 II(ImplicitOps
, em_cpuid
, cpuid
),
4793 F(DstMem
| SrcReg
| ModRM
| BitOp
| NoWrite
, em_bt
),
4794 F(DstMem
| SrcReg
| Src2ImmByte
| ModRM
, em_shld
),
4795 F(DstMem
| SrcReg
| Src2CL
| ModRM
, em_shld
), N
, N
,
4797 I(Stack
| Src2GS
, em_push_sreg
), I(Stack
| Src2GS
, em_pop_sreg
),
4798 II(EmulateOnUD
| ImplicitOps
, em_rsm
, rsm
),
4799 F(DstMem
| SrcReg
| ModRM
| BitOp
| Lock
| PageTable
, em_bts
),
4800 F(DstMem
| SrcReg
| Src2ImmByte
| ModRM
, em_shrd
),
4801 F(DstMem
| SrcReg
| Src2CL
| ModRM
, em_shrd
),
4802 GD(0, &group15
), F(DstReg
| SrcMem
| ModRM
, em_imul
),
4804 I2bv(DstMem
| SrcReg
| ModRM
| Lock
| PageTable
| SrcWrite
, em_cmpxchg
),
4805 I(DstReg
| SrcMemFAddr
| ModRM
| Src2SS
, em_lseg
),
4806 F(DstMem
| SrcReg
| ModRM
| BitOp
| Lock
, em_btr
),
4807 I(DstReg
| SrcMemFAddr
| ModRM
| Src2FS
, em_lseg
),
4808 I(DstReg
| SrcMemFAddr
| ModRM
| Src2GS
, em_lseg
),
4809 D(DstReg
| SrcMem8
| ModRM
| Mov
), D(DstReg
| SrcMem16
| ModRM
| Mov
),
4813 F(DstMem
| SrcReg
| ModRM
| BitOp
| Lock
| PageTable
, em_btc
),
4814 I(DstReg
| SrcMem
| ModRM
, em_bsf_c
),
4815 I(DstReg
| SrcMem
| ModRM
, em_bsr_c
),
4816 D(DstReg
| SrcMem8
| ModRM
| Mov
), D(DstReg
| SrcMem16
| ModRM
| Mov
),
4818 F2bv(DstMem
| SrcReg
| ModRM
| SrcWrite
| Lock
, em_xadd
),
4819 N
, ID(0, &instr_dual_0f_c3
),
4820 N
, N
, N
, GD(0, &group9
),
4822 X8(I(DstReg
, em_bswap
)),
4824 N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
,
4826 N
, N
, N
, N
, N
, N
, N
, GP(SrcReg
| DstMem
| ModRM
| Mov
, &pfx_0f_e7
),
4827 N
, N
, N
, N
, N
, N
, N
, N
,
4829 N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
4832 static const struct instr_dual instr_dual_0f_38_f0
= {
4833 I(DstReg
| SrcMem
| Mov
, em_movbe
), N
4836 static const struct instr_dual instr_dual_0f_38_f1
= {
4837 I(DstMem
| SrcReg
| Mov
, em_movbe
), N
4840 static const struct gprefix three_byte_0f_38_f0
= {
4841 ID(0, &instr_dual_0f_38_f0
), N
, N
, N
4844 static const struct gprefix three_byte_0f_38_f1
= {
4845 ID(0, &instr_dual_0f_38_f1
), N
, N
, N
4849 * Insns below are selected by the prefix which indexed by the third opcode
4852 static const struct opcode opcode_map_0f_38
[256] = {
4854 X16(N
), X16(N
), X16(N
), X16(N
), X16(N
), X16(N
), X16(N
), X16(N
),
4856 X16(N
), X16(N
), X16(N
), X16(N
), X16(N
), X16(N
), X16(N
),
4858 GP(EmulateOnUD
| ModRM
, &three_byte_0f_38_f0
),
4859 GP(EmulateOnUD
| ModRM
, &three_byte_0f_38_f1
),
4880 static unsigned imm_size(struct x86_emulate_ctxt
*ctxt
)
4884 size
= (ctxt
->d
& ByteOp
) ? 1 : ctxt
->op_bytes
;
4890 static int decode_imm(struct x86_emulate_ctxt
*ctxt
, struct operand
*op
,
4891 unsigned size
, bool sign_extension
)
4893 int rc
= X86EMUL_CONTINUE
;
4897 op
->addr
.mem
.ea
= ctxt
->_eip
;
4898 /* NB. Immediates are sign-extended as necessary. */
4899 switch (op
->bytes
) {
4901 op
->val
= insn_fetch(s8
, ctxt
);
4904 op
->val
= insn_fetch(s16
, ctxt
);
4907 op
->val
= insn_fetch(s32
, ctxt
);
4910 op
->val
= insn_fetch(s64
, ctxt
);
4913 if (!sign_extension
) {
4914 switch (op
->bytes
) {
4922 op
->val
&= 0xffffffff;
4930 static int decode_operand(struct x86_emulate_ctxt
*ctxt
, struct operand
*op
,
4933 int rc
= X86EMUL_CONTINUE
;
4937 decode_register_operand(ctxt
, op
);
4940 rc
= decode_imm(ctxt
, op
, 1, false);
4943 ctxt
->memop
.bytes
= (ctxt
->d
& ByteOp
) ? 1 : ctxt
->op_bytes
;
4947 if (ctxt
->d
& BitOp
)
4948 fetch_bit_operand(ctxt
);
4949 op
->orig_val
= op
->val
;
4952 ctxt
->memop
.bytes
= (ctxt
->op_bytes
== 8) ? 16 : 8;
4956 op
->bytes
= (ctxt
->d
& ByteOp
) ? 1 : ctxt
->op_bytes
;
4957 op
->addr
.reg
= reg_rmw(ctxt
, VCPU_REGS_RAX
);
4958 fetch_register_operand(op
);
4959 op
->orig_val
= op
->val
;
4963 op
->bytes
= (ctxt
->d
& ByteOp
) ? 2 : ctxt
->op_bytes
;
4964 op
->addr
.reg
= reg_rmw(ctxt
, VCPU_REGS_RAX
);
4965 fetch_register_operand(op
);
4966 op
->orig_val
= op
->val
;
4969 if (ctxt
->d
& ByteOp
) {
4974 op
->bytes
= ctxt
->op_bytes
;
4975 op
->addr
.reg
= reg_rmw(ctxt
, VCPU_REGS_RDX
);
4976 fetch_register_operand(op
);
4977 op
->orig_val
= op
->val
;
4981 op
->bytes
= (ctxt
->d
& ByteOp
) ? 1 : ctxt
->op_bytes
;
4983 register_address(ctxt
, VCPU_REGS_RDI
);
4984 op
->addr
.mem
.seg
= VCPU_SREG_ES
;
4991 op
->addr
.reg
= reg_rmw(ctxt
, VCPU_REGS_RDX
);
4992 fetch_register_operand(op
);
4997 op
->val
= reg_read(ctxt
, VCPU_REGS_RCX
) & 0xff;
5000 rc
= decode_imm(ctxt
, op
, 1, true);
5008 rc
= decode_imm(ctxt
, op
, imm_size(ctxt
), true);
5011 rc
= decode_imm(ctxt
, op
, ctxt
->op_bytes
, true);
5014 ctxt
->memop
.bytes
= 1;
5015 if (ctxt
->memop
.type
== OP_REG
) {
5016 ctxt
->memop
.addr
.reg
= decode_register(ctxt
,
5017 ctxt
->modrm_rm
, true);
5018 fetch_register_operand(&ctxt
->memop
);
5022 ctxt
->memop
.bytes
= 2;
5025 ctxt
->memop
.bytes
= 4;
5028 rc
= decode_imm(ctxt
, op
, 2, false);
5031 rc
= decode_imm(ctxt
, op
, imm_size(ctxt
), false);
5035 op
->bytes
= (ctxt
->d
& ByteOp
) ? 1 : ctxt
->op_bytes
;
5037 register_address(ctxt
, VCPU_REGS_RSI
);
5038 op
->addr
.mem
.seg
= ctxt
->seg_override
;
5044 op
->bytes
= (ctxt
->d
& ByteOp
) ? 1 : ctxt
->op_bytes
;
5047 reg_read(ctxt
, VCPU_REGS_RBX
) +
5048 (reg_read(ctxt
, VCPU_REGS_RAX
) & 0xff));
5049 op
->addr
.mem
.seg
= ctxt
->seg_override
;
5054 op
->addr
.mem
.ea
= ctxt
->_eip
;
5055 op
->bytes
= ctxt
->op_bytes
+ 2;
5056 insn_fetch_arr(op
->valptr
, op
->bytes
, ctxt
);
5059 ctxt
->memop
.bytes
= ctxt
->op_bytes
+ 2;
5063 op
->val
= VCPU_SREG_ES
;
5067 op
->val
= VCPU_SREG_CS
;
5071 op
->val
= VCPU_SREG_SS
;
5075 op
->val
= VCPU_SREG_DS
;
5079 op
->val
= VCPU_SREG_FS
;
5083 op
->val
= VCPU_SREG_GS
;
5086 /* Special instructions do their own operand decoding. */
5088 op
->type
= OP_NONE
; /* Disable writeback. */
5096 int x86_decode_insn(struct x86_emulate_ctxt
*ctxt
, void *insn
, int insn_len
)
5098 int rc
= X86EMUL_CONTINUE
;
5099 int mode
= ctxt
->mode
;
5100 int def_op_bytes
, def_ad_bytes
, goffset
, simd_prefix
;
5101 bool op_prefix
= false;
5102 bool has_seg_override
= false;
5103 struct opcode opcode
;
5105 struct desc_struct desc
;
5107 ctxt
->memop
.type
= OP_NONE
;
5108 ctxt
->memopp
= NULL
;
5109 ctxt
->_eip
= ctxt
->eip
;
5110 ctxt
->fetch
.ptr
= ctxt
->fetch
.data
;
5111 ctxt
->fetch
.end
= ctxt
->fetch
.data
+ insn_len
;
5112 ctxt
->opcode_len
= 1;
5114 memcpy(ctxt
->fetch
.data
, insn
, insn_len
);
5116 rc
= __do_insn_fetch_bytes(ctxt
, 1);
5117 if (rc
!= X86EMUL_CONTINUE
)
5122 case X86EMUL_MODE_REAL
:
5123 case X86EMUL_MODE_VM86
:
5124 def_op_bytes
= def_ad_bytes
= 2;
5125 ctxt
->ops
->get_segment(ctxt
, &dummy
, &desc
, NULL
, VCPU_SREG_CS
);
5127 def_op_bytes
= def_ad_bytes
= 4;
5129 case X86EMUL_MODE_PROT16
:
5130 def_op_bytes
= def_ad_bytes
= 2;
5132 case X86EMUL_MODE_PROT32
:
5133 def_op_bytes
= def_ad_bytes
= 4;
5135 #ifdef CONFIG_X86_64
5136 case X86EMUL_MODE_PROT64
:
5142 return EMULATION_FAILED
;
5145 ctxt
->op_bytes
= def_op_bytes
;
5146 ctxt
->ad_bytes
= def_ad_bytes
;
5148 /* Legacy prefixes. */
5150 switch (ctxt
->b
= insn_fetch(u8
, ctxt
)) {
5151 case 0x66: /* operand-size override */
5153 /* switch between 2/4 bytes */
5154 ctxt
->op_bytes
= def_op_bytes
^ 6;
5156 case 0x67: /* address-size override */
5157 if (mode
== X86EMUL_MODE_PROT64
)
5158 /* switch between 4/8 bytes */
5159 ctxt
->ad_bytes
= def_ad_bytes
^ 12;
5161 /* switch between 2/4 bytes */
5162 ctxt
->ad_bytes
= def_ad_bytes
^ 6;
5164 case 0x26: /* ES override */
5165 case 0x2e: /* CS override */
5166 case 0x36: /* SS override */
5167 case 0x3e: /* DS override */
5168 has_seg_override
= true;
5169 ctxt
->seg_override
= (ctxt
->b
>> 3) & 3;
5171 case 0x64: /* FS override */
5172 case 0x65: /* GS override */
5173 has_seg_override
= true;
5174 ctxt
->seg_override
= ctxt
->b
& 7;
5176 case 0x40 ... 0x4f: /* REX */
5177 if (mode
!= X86EMUL_MODE_PROT64
)
5179 ctxt
->rex_prefix
= ctxt
->b
;
5181 case 0xf0: /* LOCK */
5182 ctxt
->lock_prefix
= 1;
5184 case 0xf2: /* REPNE/REPNZ */
5185 case 0xf3: /* REP/REPE/REPZ */
5186 ctxt
->rep_prefix
= ctxt
->b
;
5192 /* Any legacy prefix after a REX prefix nullifies its effect. */
5194 ctxt
->rex_prefix
= 0;
5200 if (ctxt
->rex_prefix
& 8)
5201 ctxt
->op_bytes
= 8; /* REX.W */
5203 /* Opcode byte(s). */
5204 opcode
= opcode_table
[ctxt
->b
];
5205 /* Two-byte opcode? */
5206 if (ctxt
->b
== 0x0f) {
5207 ctxt
->opcode_len
= 2;
5208 ctxt
->b
= insn_fetch(u8
, ctxt
);
5209 opcode
= twobyte_table
[ctxt
->b
];
5211 /* 0F_38 opcode map */
5212 if (ctxt
->b
== 0x38) {
5213 ctxt
->opcode_len
= 3;
5214 ctxt
->b
= insn_fetch(u8
, ctxt
);
5215 opcode
= opcode_map_0f_38
[ctxt
->b
];
5218 ctxt
->d
= opcode
.flags
;
5220 if (ctxt
->d
& ModRM
)
5221 ctxt
->modrm
= insn_fetch(u8
, ctxt
);
5223 /* vex-prefix instructions are not implemented */
5224 if (ctxt
->opcode_len
== 1 && (ctxt
->b
== 0xc5 || ctxt
->b
== 0xc4) &&
5225 (mode
== X86EMUL_MODE_PROT64
|| (ctxt
->modrm
& 0xc0) == 0xc0)) {
5229 while (ctxt
->d
& GroupMask
) {
5230 switch (ctxt
->d
& GroupMask
) {
5232 goffset
= (ctxt
->modrm
>> 3) & 7;
5233 opcode
= opcode
.u
.group
[goffset
];
5236 goffset
= (ctxt
->modrm
>> 3) & 7;
5237 if ((ctxt
->modrm
>> 6) == 3)
5238 opcode
= opcode
.u
.gdual
->mod3
[goffset
];
5240 opcode
= opcode
.u
.gdual
->mod012
[goffset
];
5243 goffset
= ctxt
->modrm
& 7;
5244 opcode
= opcode
.u
.group
[goffset
];
5247 if (ctxt
->rep_prefix
&& op_prefix
)
5248 return EMULATION_FAILED
;
5249 simd_prefix
= op_prefix
? 0x66 : ctxt
->rep_prefix
;
5250 switch (simd_prefix
) {
5251 case 0x00: opcode
= opcode
.u
.gprefix
->pfx_no
; break;
5252 case 0x66: opcode
= opcode
.u
.gprefix
->pfx_66
; break;
5253 case 0xf2: opcode
= opcode
.u
.gprefix
->pfx_f2
; break;
5254 case 0xf3: opcode
= opcode
.u
.gprefix
->pfx_f3
; break;
5258 if (ctxt
->modrm
> 0xbf)
5259 opcode
= opcode
.u
.esc
->high
[ctxt
->modrm
- 0xc0];
5261 opcode
= opcode
.u
.esc
->op
[(ctxt
->modrm
>> 3) & 7];
5264 if ((ctxt
->modrm
>> 6) == 3)
5265 opcode
= opcode
.u
.idual
->mod3
;
5267 opcode
= opcode
.u
.idual
->mod012
;
5270 if (ctxt
->mode
== X86EMUL_MODE_PROT64
)
5271 opcode
= opcode
.u
.mdual
->mode64
;
5273 opcode
= opcode
.u
.mdual
->mode32
;
5276 return EMULATION_FAILED
;
5279 ctxt
->d
&= ~(u64
)GroupMask
;
5280 ctxt
->d
|= opcode
.flags
;
5285 return EMULATION_FAILED
;
5287 ctxt
->execute
= opcode
.u
.execute
;
5289 if (unlikely(ctxt
->ud
) && likely(!(ctxt
->d
& EmulateOnUD
)))
5290 return EMULATION_FAILED
;
5292 if (unlikely(ctxt
->d
&
5293 (NotImpl
|Stack
|Op3264
|Sse
|Mmx
|Intercept
|CheckPerm
|NearBranch
|
5296 * These are copied unconditionally here, and checked unconditionally
5297 * in x86_emulate_insn.
5299 ctxt
->check_perm
= opcode
.check_perm
;
5300 ctxt
->intercept
= opcode
.intercept
;
5302 if (ctxt
->d
& NotImpl
)
5303 return EMULATION_FAILED
;
5305 if (mode
== X86EMUL_MODE_PROT64
) {
5306 if (ctxt
->op_bytes
== 4 && (ctxt
->d
& Stack
))
5308 else if (ctxt
->d
& NearBranch
)
5312 if (ctxt
->d
& Op3264
) {
5313 if (mode
== X86EMUL_MODE_PROT64
)
5319 if ((ctxt
->d
& No16
) && ctxt
->op_bytes
== 2)
5323 ctxt
->op_bytes
= 16;
5324 else if (ctxt
->d
& Mmx
)
5328 /* ModRM and SIB bytes. */
5329 if (ctxt
->d
& ModRM
) {
5330 rc
= decode_modrm(ctxt
, &ctxt
->memop
);
5331 if (!has_seg_override
) {
5332 has_seg_override
= true;
5333 ctxt
->seg_override
= ctxt
->modrm_seg
;
5335 } else if (ctxt
->d
& MemAbs
)
5336 rc
= decode_abs(ctxt
, &ctxt
->memop
);
5337 if (rc
!= X86EMUL_CONTINUE
)
5340 if (!has_seg_override
)
5341 ctxt
->seg_override
= VCPU_SREG_DS
;
5343 ctxt
->memop
.addr
.mem
.seg
= ctxt
->seg_override
;
5346 * Decode and fetch the source operand: register, memory
5349 rc
= decode_operand(ctxt
, &ctxt
->src
, (ctxt
->d
>> SrcShift
) & OpMask
);
5350 if (rc
!= X86EMUL_CONTINUE
)
5354 * Decode and fetch the second source operand: register, memory
5357 rc
= decode_operand(ctxt
, &ctxt
->src2
, (ctxt
->d
>> Src2Shift
) & OpMask
);
5358 if (rc
!= X86EMUL_CONTINUE
)
5361 /* Decode and fetch the destination operand: register or memory. */
5362 rc
= decode_operand(ctxt
, &ctxt
->dst
, (ctxt
->d
>> DstShift
) & OpMask
);
5364 if (ctxt
->rip_relative
&& likely(ctxt
->memopp
))
5365 ctxt
->memopp
->addr
.mem
.ea
= address_mask(ctxt
,
5366 ctxt
->memopp
->addr
.mem
.ea
+ ctxt
->_eip
);
5369 return (rc
!= X86EMUL_CONTINUE
) ? EMULATION_FAILED
: EMULATION_OK
;
5372 bool x86_page_table_writing_insn(struct x86_emulate_ctxt
*ctxt
)
5374 return ctxt
->d
& PageTable
;
5377 static bool string_insn_completed(struct x86_emulate_ctxt
*ctxt
)
5379 /* The second termination condition only applies for REPE
5380 * and REPNE. Test if the repeat string operation prefix is
5381 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
5382 * corresponding termination condition according to:
5383 * - if REPE/REPZ and ZF = 0 then done
5384 * - if REPNE/REPNZ and ZF = 1 then done
5386 if (((ctxt
->b
== 0xa6) || (ctxt
->b
== 0xa7) ||
5387 (ctxt
->b
== 0xae) || (ctxt
->b
== 0xaf))
5388 && (((ctxt
->rep_prefix
== REPE_PREFIX
) &&
5389 ((ctxt
->eflags
& X86_EFLAGS_ZF
) == 0))
5390 || ((ctxt
->rep_prefix
== REPNE_PREFIX
) &&
5391 ((ctxt
->eflags
& X86_EFLAGS_ZF
) == X86_EFLAGS_ZF
))))
5397 static int flush_pending_x87_faults(struct x86_emulate_ctxt
*ctxt
)
5401 rc
= asm_safe("fwait");
5403 if (unlikely(rc
!= X86EMUL_CONTINUE
))
5404 return emulate_exception(ctxt
, MF_VECTOR
, 0, false);
5406 return X86EMUL_CONTINUE
;
5409 static void fetch_possible_mmx_operand(struct x86_emulate_ctxt
*ctxt
,
5412 if (op
->type
== OP_MM
)
5413 read_mmx_reg(ctxt
, &op
->mm_val
, op
->addr
.mm
);
5416 static int fastop(struct x86_emulate_ctxt
*ctxt
, void (*fop
)(struct fastop
*))
5418 ulong flags
= (ctxt
->eflags
& EFLAGS_MASK
) | X86_EFLAGS_IF
;
5420 if (!(ctxt
->d
& ByteOp
))
5421 fop
+= __ffs(ctxt
->dst
.bytes
) * FASTOP_SIZE
;
5423 asm("push %[flags]; popf; " CALL_NOSPEC
" ; pushf; pop %[flags]\n"
5424 : "+a"(ctxt
->dst
.val
), "+d"(ctxt
->src
.val
), [flags
]"+D"(flags
),
5425 [thunk_target
]"+S"(fop
), ASM_CALL_CONSTRAINT
5426 : "c"(ctxt
->src2
.val
));
5428 ctxt
->eflags
= (ctxt
->eflags
& ~EFLAGS_MASK
) | (flags
& EFLAGS_MASK
);
5429 if (!fop
) /* exception is returned in fop variable */
5430 return emulate_de(ctxt
);
5431 return X86EMUL_CONTINUE
;
5434 void init_decode_cache(struct x86_emulate_ctxt
*ctxt
)
5436 memset(&ctxt
->rip_relative
, 0,
5437 (void *)&ctxt
->modrm
- (void *)&ctxt
->rip_relative
);
5439 ctxt
->io_read
.pos
= 0;
5440 ctxt
->io_read
.end
= 0;
5441 ctxt
->mem_read
.end
= 0;
5444 int x86_emulate_insn(struct x86_emulate_ctxt
*ctxt
)
5446 const struct x86_emulate_ops
*ops
= ctxt
->ops
;
5447 int rc
= X86EMUL_CONTINUE
;
5448 int saved_dst_type
= ctxt
->dst
.type
;
5449 unsigned emul_flags
;
5451 ctxt
->mem_read
.pos
= 0;
5453 /* LOCK prefix is allowed only with some instructions */
5454 if (ctxt
->lock_prefix
&& (!(ctxt
->d
& Lock
) || ctxt
->dst
.type
!= OP_MEM
)) {
5455 rc
= emulate_ud(ctxt
);
5459 if ((ctxt
->d
& SrcMask
) == SrcMemFAddr
&& ctxt
->src
.type
!= OP_MEM
) {
5460 rc
= emulate_ud(ctxt
);
5464 emul_flags
= ctxt
->ops
->get_hflags(ctxt
);
5465 if (unlikely(ctxt
->d
&
5466 (No64
|Undefined
|Sse
|Mmx
|Intercept
|CheckPerm
|Priv
|Prot
|String
))) {
5467 if ((ctxt
->mode
== X86EMUL_MODE_PROT64
&& (ctxt
->d
& No64
)) ||
5468 (ctxt
->d
& Undefined
)) {
5469 rc
= emulate_ud(ctxt
);
5473 if (((ctxt
->d
& (Sse
|Mmx
)) && ((ops
->get_cr(ctxt
, 0) & X86_CR0_EM
)))
5474 || ((ctxt
->d
& Sse
) && !(ops
->get_cr(ctxt
, 4) & X86_CR4_OSFXSR
))) {
5475 rc
= emulate_ud(ctxt
);
5479 if ((ctxt
->d
& (Sse
|Mmx
)) && (ops
->get_cr(ctxt
, 0) & X86_CR0_TS
)) {
5480 rc
= emulate_nm(ctxt
);
5484 if (ctxt
->d
& Mmx
) {
5485 rc
= flush_pending_x87_faults(ctxt
);
5486 if (rc
!= X86EMUL_CONTINUE
)
5489 * Now that we know the fpu is exception safe, we can fetch
5492 fetch_possible_mmx_operand(ctxt
, &ctxt
->src
);
5493 fetch_possible_mmx_operand(ctxt
, &ctxt
->src2
);
5494 if (!(ctxt
->d
& Mov
))
5495 fetch_possible_mmx_operand(ctxt
, &ctxt
->dst
);
5498 if (unlikely(emul_flags
& X86EMUL_GUEST_MASK
) && ctxt
->intercept
) {
5499 rc
= emulator_check_intercept(ctxt
, ctxt
->intercept
,
5500 X86_ICPT_PRE_EXCEPT
);
5501 if (rc
!= X86EMUL_CONTINUE
)
5505 /* Instruction can only be executed in protected mode */
5506 if ((ctxt
->d
& Prot
) && ctxt
->mode
< X86EMUL_MODE_PROT16
) {
5507 rc
= emulate_ud(ctxt
);
5511 /* Privileged instruction can be executed only in CPL=0 */
5512 if ((ctxt
->d
& Priv
) && ops
->cpl(ctxt
)) {
5513 if (ctxt
->d
& PrivUD
)
5514 rc
= emulate_ud(ctxt
);
5516 rc
= emulate_gp(ctxt
, 0);
5520 /* Do instruction specific permission checks */
5521 if (ctxt
->d
& CheckPerm
) {
5522 rc
= ctxt
->check_perm(ctxt
);
5523 if (rc
!= X86EMUL_CONTINUE
)
5527 if (unlikely(emul_flags
& X86EMUL_GUEST_MASK
) && (ctxt
->d
& Intercept
)) {
5528 rc
= emulator_check_intercept(ctxt
, ctxt
->intercept
,
5529 X86_ICPT_POST_EXCEPT
);
5530 if (rc
!= X86EMUL_CONTINUE
)
5534 if (ctxt
->rep_prefix
&& (ctxt
->d
& String
)) {
5535 /* All REP prefixes have the same first termination condition */
5536 if (address_mask(ctxt
, reg_read(ctxt
, VCPU_REGS_RCX
)) == 0) {
5537 string_registers_quirk(ctxt
);
5538 ctxt
->eip
= ctxt
->_eip
;
5539 ctxt
->eflags
&= ~X86_EFLAGS_RF
;
5545 if ((ctxt
->src
.type
== OP_MEM
) && !(ctxt
->d
& NoAccess
)) {
5546 rc
= segmented_read(ctxt
, ctxt
->src
.addr
.mem
,
5547 ctxt
->src
.valptr
, ctxt
->src
.bytes
);
5548 if (rc
!= X86EMUL_CONTINUE
)
5550 ctxt
->src
.orig_val64
= ctxt
->src
.val64
;
5553 if (ctxt
->src2
.type
== OP_MEM
) {
5554 rc
= segmented_read(ctxt
, ctxt
->src2
.addr
.mem
,
5555 &ctxt
->src2
.val
, ctxt
->src2
.bytes
);
5556 if (rc
!= X86EMUL_CONTINUE
)
5560 if ((ctxt
->d
& DstMask
) == ImplicitOps
)
5564 if ((ctxt
->dst
.type
== OP_MEM
) && !(ctxt
->d
& Mov
)) {
5565 /* optimisation - avoid slow emulated read if Mov */
5566 rc
= segmented_read(ctxt
, ctxt
->dst
.addr
.mem
,
5567 &ctxt
->dst
.val
, ctxt
->dst
.bytes
);
5568 if (rc
!= X86EMUL_CONTINUE
) {
5569 if (!(ctxt
->d
& NoWrite
) &&
5570 rc
== X86EMUL_PROPAGATE_FAULT
&&
5571 ctxt
->exception
.vector
== PF_VECTOR
)
5572 ctxt
->exception
.error_code
|= PFERR_WRITE_MASK
;
5576 /* Copy full 64-bit value for CMPXCHG8B. */
5577 ctxt
->dst
.orig_val64
= ctxt
->dst
.val64
;
5581 if (unlikely(emul_flags
& X86EMUL_GUEST_MASK
) && (ctxt
->d
& Intercept
)) {
5582 rc
= emulator_check_intercept(ctxt
, ctxt
->intercept
,
5583 X86_ICPT_POST_MEMACCESS
);
5584 if (rc
!= X86EMUL_CONTINUE
)
5588 if (ctxt
->rep_prefix
&& (ctxt
->d
& String
))
5589 ctxt
->eflags
|= X86_EFLAGS_RF
;
5591 ctxt
->eflags
&= ~X86_EFLAGS_RF
;
5593 if (ctxt
->execute
) {
5594 if (ctxt
->d
& Fastop
) {
5595 void (*fop
)(struct fastop
*) = (void *)ctxt
->execute
;
5596 rc
= fastop(ctxt
, fop
);
5597 if (rc
!= X86EMUL_CONTINUE
)
5601 rc
= ctxt
->execute(ctxt
);
5602 if (rc
!= X86EMUL_CONTINUE
)
5607 if (ctxt
->opcode_len
== 2)
5609 else if (ctxt
->opcode_len
== 3)
5610 goto threebyte_insn
;
5613 case 0x70 ... 0x7f: /* jcc (short) */
5614 if (test_cc(ctxt
->b
, ctxt
->eflags
))
5615 rc
= jmp_rel(ctxt
, ctxt
->src
.val
);
5617 case 0x8d: /* lea r16/r32, m */
5618 ctxt
->dst
.val
= ctxt
->src
.addr
.mem
.ea
;
5620 case 0x90 ... 0x97: /* nop / xchg reg, rax */
5621 if (ctxt
->dst
.addr
.reg
== reg_rmw(ctxt
, VCPU_REGS_RAX
))
5622 ctxt
->dst
.type
= OP_NONE
;
5626 case 0x98: /* cbw/cwde/cdqe */
5627 switch (ctxt
->op_bytes
) {
5628 case 2: ctxt
->dst
.val
= (s8
)ctxt
->dst
.val
; break;
5629 case 4: ctxt
->dst
.val
= (s16
)ctxt
->dst
.val
; break;
5630 case 8: ctxt
->dst
.val
= (s32
)ctxt
->dst
.val
; break;
5633 case 0xcc: /* int3 */
5634 rc
= emulate_int(ctxt
, 3);
5636 case 0xcd: /* int n */
5637 rc
= emulate_int(ctxt
, ctxt
->src
.val
);
5639 case 0xce: /* into */
5640 if (ctxt
->eflags
& X86_EFLAGS_OF
)
5641 rc
= emulate_int(ctxt
, 4);
5643 case 0xe9: /* jmp rel */
5644 case 0xeb: /* jmp rel short */
5645 rc
= jmp_rel(ctxt
, ctxt
->src
.val
);
5646 ctxt
->dst
.type
= OP_NONE
; /* Disable writeback. */
5648 case 0xf4: /* hlt */
5649 ctxt
->ops
->halt(ctxt
);
5651 case 0xf5: /* cmc */
5652 /* complement carry flag from eflags reg */
5653 ctxt
->eflags
^= X86_EFLAGS_CF
;
5655 case 0xf8: /* clc */
5656 ctxt
->eflags
&= ~X86_EFLAGS_CF
;
5658 case 0xf9: /* stc */
5659 ctxt
->eflags
|= X86_EFLAGS_CF
;
5661 case 0xfc: /* cld */
5662 ctxt
->eflags
&= ~X86_EFLAGS_DF
;
5664 case 0xfd: /* std */
5665 ctxt
->eflags
|= X86_EFLAGS_DF
;
5668 goto cannot_emulate
;
5671 if (rc
!= X86EMUL_CONTINUE
)
5675 if (ctxt
->d
& SrcWrite
) {
5676 BUG_ON(ctxt
->src
.type
== OP_MEM
|| ctxt
->src
.type
== OP_MEM_STR
);
5677 rc
= writeback(ctxt
, &ctxt
->src
);
5678 if (rc
!= X86EMUL_CONTINUE
)
5681 if (!(ctxt
->d
& NoWrite
)) {
5682 rc
= writeback(ctxt
, &ctxt
->dst
);
5683 if (rc
!= X86EMUL_CONTINUE
)
5688 * restore dst type in case the decoding will be reused
5689 * (happens for string instruction )
5691 ctxt
->dst
.type
= saved_dst_type
;
5693 if ((ctxt
->d
& SrcMask
) == SrcSI
)
5694 string_addr_inc(ctxt
, VCPU_REGS_RSI
, &ctxt
->src
);
5696 if ((ctxt
->d
& DstMask
) == DstDI
)
5697 string_addr_inc(ctxt
, VCPU_REGS_RDI
, &ctxt
->dst
);
5699 if (ctxt
->rep_prefix
&& (ctxt
->d
& String
)) {
5701 struct read_cache
*r
= &ctxt
->io_read
;
5702 if ((ctxt
->d
& SrcMask
) == SrcSI
)
5703 count
= ctxt
->src
.count
;
5705 count
= ctxt
->dst
.count
;
5706 register_address_increment(ctxt
, VCPU_REGS_RCX
, -count
);
5708 if (!string_insn_completed(ctxt
)) {
5710 * Re-enter guest when pio read ahead buffer is empty
5711 * or, if it is not used, after each 1024 iteration.
5713 if ((r
->end
!= 0 || reg_read(ctxt
, VCPU_REGS_RCX
) & 0x3ff) &&
5714 (r
->end
== 0 || r
->end
!= r
->pos
)) {
5716 * Reset read cache. Usually happens before
5717 * decode, but since instruction is restarted
5718 * we have to do it here.
5720 ctxt
->mem_read
.end
= 0;
5721 writeback_registers(ctxt
);
5722 return EMULATION_RESTART
;
5724 goto done
; /* skip rip writeback */
5726 ctxt
->eflags
&= ~X86_EFLAGS_RF
;
5729 ctxt
->eip
= ctxt
->_eip
;
5732 if (rc
== X86EMUL_PROPAGATE_FAULT
) {
5733 WARN_ON(ctxt
->exception
.vector
> 0x1f);
5734 ctxt
->have_exception
= true;
5736 if (rc
== X86EMUL_INTERCEPTED
)
5737 return EMULATION_INTERCEPTED
;
5739 if (rc
== X86EMUL_CONTINUE
)
5740 writeback_registers(ctxt
);
5742 return (rc
== X86EMUL_UNHANDLEABLE
) ? EMULATION_FAILED
: EMULATION_OK
;
5746 case 0x09: /* wbinvd */
5747 (ctxt
->ops
->wbinvd
)(ctxt
);
5749 case 0x08: /* invd */
5750 case 0x0d: /* GrpP (prefetch) */
5751 case 0x18: /* Grp16 (prefetch/nop) */
5752 case 0x1f: /* nop */
5754 case 0x20: /* mov cr, reg */
5755 ctxt
->dst
.val
= ops
->get_cr(ctxt
, ctxt
->modrm_reg
);
5757 case 0x21: /* mov from dr to reg */
5758 ops
->get_dr(ctxt
, ctxt
->modrm_reg
, &ctxt
->dst
.val
);
5760 case 0x40 ... 0x4f: /* cmov */
5761 if (test_cc(ctxt
->b
, ctxt
->eflags
))
5762 ctxt
->dst
.val
= ctxt
->src
.val
;
5763 else if (ctxt
->op_bytes
!= 4)
5764 ctxt
->dst
.type
= OP_NONE
; /* no writeback */
5766 case 0x80 ... 0x8f: /* jnz rel, etc*/
5767 if (test_cc(ctxt
->b
, ctxt
->eflags
))
5768 rc
= jmp_rel(ctxt
, ctxt
->src
.val
);
5770 case 0x90 ... 0x9f: /* setcc r/m8 */
5771 ctxt
->dst
.val
= test_cc(ctxt
->b
, ctxt
->eflags
);
5773 case 0xb6 ... 0xb7: /* movzx */
5774 ctxt
->dst
.bytes
= ctxt
->op_bytes
;
5775 ctxt
->dst
.val
= (ctxt
->src
.bytes
== 1) ? (u8
) ctxt
->src
.val
5776 : (u16
) ctxt
->src
.val
;
5778 case 0xbe ... 0xbf: /* movsx */
5779 ctxt
->dst
.bytes
= ctxt
->op_bytes
;
5780 ctxt
->dst
.val
= (ctxt
->src
.bytes
== 1) ? (s8
) ctxt
->src
.val
:
5781 (s16
) ctxt
->src
.val
;
5784 goto cannot_emulate
;
5789 if (rc
!= X86EMUL_CONTINUE
)
5795 return EMULATION_FAILED
;
5798 void emulator_invalidate_register_cache(struct x86_emulate_ctxt
*ctxt
)
5800 invalidate_registers(ctxt
);
5803 void emulator_writeback_register_cache(struct x86_emulate_ctxt
*ctxt
)
5805 writeback_registers(ctxt
);
5808 bool emulator_can_use_gpa(struct x86_emulate_ctxt
*ctxt
)
5810 if (ctxt
->rep_prefix
&& (ctxt
->d
& String
))
5813 if (ctxt
->d
& TwoMemOp
)