1 /******************************************************************************
4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
6 * Copyright (c) 2005 Keir Fraser
8 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
9 * privileged instructions:
11 * Copyright (C) 2006 Qumranet
13 * Avi Kivity <avi@qumranet.com>
14 * Yaniv Kamay <yaniv@qumranet.com>
16 * This work is licensed under the terms of the GNU GPL, version 2. See
17 * the COPYING file in the top-level directory.
19 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
25 #include <public/xen.h>
26 #define DPRINTF(_f, _a ...) printf(_f , ## _a)
28 #include <linux/kvm_host.h>
29 #include "kvm_cache_regs.h"
30 #define DPRINTF(x...) do {} while (0)
32 #include <linux/module.h>
33 #include <asm/kvm_emulate.h>
38 * Opcode effective-address decode tables.
39 * Note that we only emulate instructions that have at least one memory
40 * operand (excluding implicit stack references). We assume that stack
41 * references and instruction fetches will never occur in special memory
42 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
46 /* Operand sizes: 8-bit operands or specified/overridden size. */
47 #define ByteOp (1<<0) /* 8-bit operands. */
48 /* Destination operand type. */
49 #define ImplicitOps (1<<1) /* Implicit in opcode. No generic decode. */
50 #define DstReg (2<<1) /* Register operand. */
51 #define DstMem (3<<1) /* Memory operand. */
52 #define DstAcc (4<<1) /* Destination Accumulator */
53 #define DstMask (7<<1)
54 /* Source operand type. */
55 #define SrcNone (0<<4) /* No source operand. */
56 #define SrcImplicit (0<<4) /* Source operand is implicit in the opcode. */
57 #define SrcReg (1<<4) /* Register operand. */
58 #define SrcMem (2<<4) /* Memory operand. */
59 #define SrcMem16 (3<<4) /* Memory operand (16-bit). */
60 #define SrcMem32 (4<<4) /* Memory operand (32-bit). */
61 #define SrcImm (5<<4) /* Immediate operand. */
62 #define SrcImmByte (6<<4) /* 8-bit sign-extended immediate operand. */
63 #define SrcOne (7<<4) /* Implied '1' */
64 #define SrcImmUByte (8<<4) /* 8-bit unsigned immediate operand. */
65 #define SrcImmU (9<<4) /* Immediate operand, unsigned */
66 #define SrcMask (0xf<<4)
67 /* Generic ModRM decode. */
69 /* Destination is only written; never read. */
72 #define MemAbs (1<<11) /* Memory operand is absolute displacement */
73 #define String (1<<12) /* String instruction (rep capable) */
74 #define Stack (1<<13) /* Stack instruction (push/pop) */
75 #define Group (1<<14) /* Bits 3:5 of modrm byte extend opcode */
76 #define GroupDual (1<<15) /* Alternate decoding of mod == 3 */
77 #define GroupMask 0xff /* Group number stored in bits 0:7 */
79 #define Lock (1<<26) /* lock prefix is allowed for the instruction */
80 #define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
82 /* Source 2 operand type */
83 #define Src2None (0<<29)
84 #define Src2CL (1<<29)
85 #define Src2ImmByte (2<<29)
86 #define Src2One (3<<29)
87 #define Src2Imm16 (4<<29)
88 #define Src2Mem16 (5<<29) /* Used for Ep encoding. First argument has to be
89 in memory and second argument is located
90 immediately after the first one in memory. */
91 #define Src2Mask (7<<29)
94 Group1_80
, Group1_81
, Group1_82
, Group1_83
,
95 Group1A
, Group3_Byte
, Group3
, Group4
, Group5
, Group7
,
99 static u32 opcode_table
[256] = {
101 ByteOp
| DstMem
| SrcReg
| ModRM
| Lock
, DstMem
| SrcReg
| ModRM
| Lock
,
102 ByteOp
| DstReg
| SrcMem
| ModRM
, DstReg
| SrcMem
| ModRM
,
103 ByteOp
| DstAcc
| SrcImm
, DstAcc
| SrcImm
,
104 ImplicitOps
| Stack
| No64
, ImplicitOps
| Stack
| No64
,
106 ByteOp
| DstMem
| SrcReg
| ModRM
| Lock
, DstMem
| SrcReg
| ModRM
| Lock
,
107 ByteOp
| DstReg
| SrcMem
| ModRM
, DstReg
| SrcMem
| ModRM
,
108 ByteOp
| DstAcc
| SrcImm
, DstAcc
| SrcImm
,
109 ImplicitOps
| Stack
| No64
, 0,
111 ByteOp
| DstMem
| SrcReg
| ModRM
| Lock
, DstMem
| SrcReg
| ModRM
| Lock
,
112 ByteOp
| DstReg
| SrcMem
| ModRM
, DstReg
| SrcMem
| ModRM
,
113 ByteOp
| DstAcc
| SrcImm
, DstAcc
| SrcImm
,
114 ImplicitOps
| Stack
| No64
, ImplicitOps
| Stack
| No64
,
116 ByteOp
| DstMem
| SrcReg
| ModRM
| Lock
, DstMem
| SrcReg
| ModRM
| Lock
,
117 ByteOp
| DstReg
| SrcMem
| ModRM
, DstReg
| SrcMem
| ModRM
,
118 ByteOp
| DstAcc
| SrcImm
, DstAcc
| SrcImm
,
119 ImplicitOps
| Stack
| No64
, ImplicitOps
| Stack
| No64
,
121 ByteOp
| DstMem
| SrcReg
| ModRM
| Lock
, DstMem
| SrcReg
| ModRM
| Lock
,
122 ByteOp
| DstReg
| SrcMem
| ModRM
, DstReg
| SrcMem
| ModRM
,
123 DstAcc
| SrcImmByte
, DstAcc
| SrcImm
, 0, 0,
125 ByteOp
| DstMem
| SrcReg
| ModRM
| Lock
, DstMem
| SrcReg
| ModRM
| Lock
,
126 ByteOp
| DstReg
| SrcMem
| ModRM
, DstReg
| SrcMem
| ModRM
,
129 ByteOp
| DstMem
| SrcReg
| ModRM
| Lock
, DstMem
| SrcReg
| ModRM
| Lock
,
130 ByteOp
| DstReg
| SrcMem
| ModRM
, DstReg
| SrcMem
| ModRM
,
133 ByteOp
| DstMem
| SrcReg
| ModRM
, DstMem
| SrcReg
| ModRM
,
134 ByteOp
| DstReg
| SrcMem
| ModRM
, DstReg
| SrcMem
| ModRM
,
135 ByteOp
| DstAcc
| SrcImm
, DstAcc
| SrcImm
,
138 DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
,
140 DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
,
142 SrcReg
| Stack
, SrcReg
| Stack
, SrcReg
| Stack
, SrcReg
| Stack
,
143 SrcReg
| Stack
, SrcReg
| Stack
, SrcReg
| Stack
, SrcReg
| Stack
,
145 DstReg
| Stack
, DstReg
| Stack
, DstReg
| Stack
, DstReg
| Stack
,
146 DstReg
| Stack
, DstReg
| Stack
, DstReg
| Stack
, DstReg
| Stack
,
148 ImplicitOps
| Stack
| No64
, ImplicitOps
| Stack
| No64
,
149 0, DstReg
| SrcMem32
| ModRM
| Mov
/* movsxd (x86/64) */ ,
152 SrcImm
| Mov
| Stack
, 0, SrcImmByte
| Mov
| Stack
, 0,
153 SrcNone
| ByteOp
| ImplicitOps
, SrcNone
| ImplicitOps
, /* insb, insw/insd */
154 SrcNone
| ByteOp
| ImplicitOps
, SrcNone
| ImplicitOps
, /* outsb, outsw/outsd */
156 SrcImmByte
, SrcImmByte
, SrcImmByte
, SrcImmByte
,
157 SrcImmByte
, SrcImmByte
, SrcImmByte
, SrcImmByte
,
159 SrcImmByte
, SrcImmByte
, SrcImmByte
, SrcImmByte
,
160 SrcImmByte
, SrcImmByte
, SrcImmByte
, SrcImmByte
,
162 Group
| Group1_80
, Group
| Group1_81
,
163 Group
| Group1_82
, Group
| Group1_83
,
164 ByteOp
| DstMem
| SrcReg
| ModRM
, DstMem
| SrcReg
| ModRM
,
165 ByteOp
| DstMem
| SrcReg
| ModRM
| Lock
, DstMem
| SrcReg
| ModRM
| Lock
,
167 ByteOp
| DstMem
| SrcReg
| ModRM
| Mov
, DstMem
| SrcReg
| ModRM
| Mov
,
168 ByteOp
| DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
169 DstMem
| SrcReg
| ModRM
| Mov
, ModRM
| DstReg
,
170 DstReg
| SrcMem
| ModRM
| Mov
, Group
| Group1A
,
172 DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
,
174 0, 0, SrcImm
| Src2Imm16
| No64
, 0,
175 ImplicitOps
| Stack
, ImplicitOps
| Stack
, 0, 0,
177 ByteOp
| DstReg
| SrcMem
| Mov
| MemAbs
, DstReg
| SrcMem
| Mov
| MemAbs
,
178 ByteOp
| DstMem
| SrcReg
| Mov
| MemAbs
, DstMem
| SrcReg
| Mov
| MemAbs
,
179 ByteOp
| ImplicitOps
| Mov
| String
, ImplicitOps
| Mov
| String
,
180 ByteOp
| ImplicitOps
| String
, ImplicitOps
| String
,
182 0, 0, ByteOp
| ImplicitOps
| Mov
| String
, ImplicitOps
| Mov
| String
,
183 ByteOp
| ImplicitOps
| Mov
| String
, ImplicitOps
| Mov
| String
,
184 ByteOp
| ImplicitOps
| String
, ImplicitOps
| String
,
186 ByteOp
| DstReg
| SrcImm
| Mov
, ByteOp
| DstReg
| SrcImm
| Mov
,
187 ByteOp
| DstReg
| SrcImm
| Mov
, ByteOp
| DstReg
| SrcImm
| Mov
,
188 ByteOp
| DstReg
| SrcImm
| Mov
, ByteOp
| DstReg
| SrcImm
| Mov
,
189 ByteOp
| DstReg
| SrcImm
| Mov
, ByteOp
| DstReg
| SrcImm
| Mov
,
191 DstReg
| SrcImm
| Mov
, DstReg
| SrcImm
| Mov
,
192 DstReg
| SrcImm
| Mov
, DstReg
| SrcImm
| Mov
,
193 DstReg
| SrcImm
| Mov
, DstReg
| SrcImm
| Mov
,
194 DstReg
| SrcImm
| Mov
, DstReg
| SrcImm
| Mov
,
196 ByteOp
| DstMem
| SrcImm
| ModRM
, DstMem
| SrcImmByte
| ModRM
,
197 0, ImplicitOps
| Stack
, 0, 0,
198 ByteOp
| DstMem
| SrcImm
| ModRM
| Mov
, DstMem
| SrcImm
| ModRM
| Mov
,
200 0, 0, 0, ImplicitOps
| Stack
,
201 ImplicitOps
, SrcImmByte
, ImplicitOps
| No64
, ImplicitOps
,
203 ByteOp
| DstMem
| SrcImplicit
| ModRM
, DstMem
| SrcImplicit
| ModRM
,
204 ByteOp
| DstMem
| SrcImplicit
| ModRM
, DstMem
| SrcImplicit
| ModRM
,
207 0, 0, 0, 0, 0, 0, 0, 0,
210 ByteOp
| SrcImmUByte
, SrcImmUByte
,
211 ByteOp
| SrcImmUByte
, SrcImmUByte
,
213 SrcImm
| Stack
, SrcImm
| ImplicitOps
,
214 SrcImmU
| Src2Imm16
| No64
, SrcImmByte
| ImplicitOps
,
215 SrcNone
| ByteOp
| ImplicitOps
, SrcNone
| ImplicitOps
,
216 SrcNone
| ByteOp
| ImplicitOps
, SrcNone
| ImplicitOps
,
219 ImplicitOps
| Priv
, ImplicitOps
, Group
| Group3_Byte
, Group
| Group3
,
221 ImplicitOps
, 0, ImplicitOps
, ImplicitOps
,
222 ImplicitOps
, ImplicitOps
, Group
| Group4
, Group
| Group5
,
225 static u32 twobyte_table
[256] = {
227 0, Group
| GroupDual
| Group7
, 0, 0,
228 0, ImplicitOps
, ImplicitOps
| Priv
, 0,
229 ImplicitOps
| Priv
, ImplicitOps
| Priv
, 0, 0,
230 0, ImplicitOps
| ModRM
, 0, 0,
232 0, 0, 0, 0, 0, 0, 0, 0, ImplicitOps
| ModRM
, 0, 0, 0, 0, 0, 0, 0,
234 ModRM
| ImplicitOps
| Priv
, ModRM
| Priv
,
235 ModRM
| ImplicitOps
| Priv
, ModRM
| Priv
,
237 0, 0, 0, 0, 0, 0, 0, 0,
239 ImplicitOps
| Priv
, 0, ImplicitOps
| Priv
, 0,
240 ImplicitOps
, ImplicitOps
| Priv
, 0, 0,
241 0, 0, 0, 0, 0, 0, 0, 0,
243 DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
244 DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
245 DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
246 DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
248 DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
249 DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
250 DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
251 DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
253 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
255 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
257 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
259 SrcImm
, SrcImm
, SrcImm
, SrcImm
, SrcImm
, SrcImm
, SrcImm
, SrcImm
,
260 SrcImm
, SrcImm
, SrcImm
, SrcImm
, SrcImm
, SrcImm
, SrcImm
, SrcImm
,
262 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
264 ImplicitOps
| Stack
, ImplicitOps
| Stack
,
265 0, DstMem
| SrcReg
| ModRM
| BitOp
,
266 DstMem
| SrcReg
| Src2ImmByte
| ModRM
,
267 DstMem
| SrcReg
| Src2CL
| ModRM
, 0, 0,
269 ImplicitOps
| Stack
, ImplicitOps
| Stack
,
270 0, DstMem
| SrcReg
| ModRM
| BitOp
| Lock
,
271 DstMem
| SrcReg
| Src2ImmByte
| ModRM
,
272 DstMem
| SrcReg
| Src2CL
| ModRM
,
275 ByteOp
| DstMem
| SrcReg
| ModRM
| Lock
, DstMem
| SrcReg
| ModRM
| Lock
,
276 0, DstMem
| SrcReg
| ModRM
| BitOp
| Lock
,
277 0, 0, ByteOp
| DstReg
| SrcMem
| ModRM
| Mov
,
278 DstReg
| SrcMem16
| ModRM
| Mov
,
281 Group
| Group8
, DstMem
| SrcReg
| ModRM
| BitOp
| Lock
,
282 0, 0, ByteOp
| DstReg
| SrcMem
| ModRM
| Mov
,
283 DstReg
| SrcMem16
| ModRM
| Mov
,
285 0, 0, 0, DstMem
| SrcReg
| ModRM
| Mov
,
286 0, 0, 0, Group
| GroupDual
| Group9
,
287 0, 0, 0, 0, 0, 0, 0, 0,
289 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
291 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
293 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
296 static u32 group_table
[] = {
298 ByteOp
| DstMem
| SrcImm
| ModRM
| Lock
,
299 ByteOp
| DstMem
| SrcImm
| ModRM
| Lock
,
300 ByteOp
| DstMem
| SrcImm
| ModRM
| Lock
,
301 ByteOp
| DstMem
| SrcImm
| ModRM
| Lock
,
302 ByteOp
| DstMem
| SrcImm
| ModRM
| Lock
,
303 ByteOp
| DstMem
| SrcImm
| ModRM
| Lock
,
304 ByteOp
| DstMem
| SrcImm
| ModRM
| Lock
,
305 ByteOp
| DstMem
| SrcImm
| ModRM
,
307 DstMem
| SrcImm
| ModRM
| Lock
,
308 DstMem
| SrcImm
| ModRM
| Lock
,
309 DstMem
| SrcImm
| ModRM
| Lock
,
310 DstMem
| SrcImm
| ModRM
| Lock
,
311 DstMem
| SrcImm
| ModRM
| Lock
,
312 DstMem
| SrcImm
| ModRM
| Lock
,
313 DstMem
| SrcImm
| ModRM
| Lock
,
314 DstMem
| SrcImm
| ModRM
,
316 ByteOp
| DstMem
| SrcImm
| ModRM
| No64
| Lock
,
317 ByteOp
| DstMem
| SrcImm
| ModRM
| No64
| Lock
,
318 ByteOp
| DstMem
| SrcImm
| ModRM
| No64
| Lock
,
319 ByteOp
| DstMem
| SrcImm
| ModRM
| No64
| Lock
,
320 ByteOp
| DstMem
| SrcImm
| ModRM
| No64
| Lock
,
321 ByteOp
| DstMem
| SrcImm
| ModRM
| No64
| Lock
,
322 ByteOp
| DstMem
| SrcImm
| ModRM
| No64
| Lock
,
323 ByteOp
| DstMem
| SrcImm
| ModRM
| No64
,
325 DstMem
| SrcImmByte
| ModRM
| Lock
,
326 DstMem
| SrcImmByte
| ModRM
| Lock
,
327 DstMem
| SrcImmByte
| ModRM
| Lock
,
328 DstMem
| SrcImmByte
| ModRM
| Lock
,
329 DstMem
| SrcImmByte
| ModRM
| Lock
,
330 DstMem
| SrcImmByte
| ModRM
| Lock
,
331 DstMem
| SrcImmByte
| ModRM
| Lock
,
332 DstMem
| SrcImmByte
| ModRM
,
334 DstMem
| SrcNone
| ModRM
| Mov
| Stack
, 0, 0, 0, 0, 0, 0, 0,
336 ByteOp
| SrcImm
| DstMem
| ModRM
, 0,
337 ByteOp
| DstMem
| SrcNone
| ModRM
, ByteOp
| DstMem
| SrcNone
| ModRM
,
340 DstMem
| SrcImm
| ModRM
, 0,
341 DstMem
| SrcNone
| ModRM
, DstMem
| SrcNone
| ModRM
,
344 ByteOp
| DstMem
| SrcNone
| ModRM
, ByteOp
| DstMem
| SrcNone
| ModRM
,
347 DstMem
| SrcNone
| ModRM
, DstMem
| SrcNone
| ModRM
,
348 SrcMem
| ModRM
| Stack
, 0,
349 SrcMem
| ModRM
| Stack
, SrcMem
| ModRM
| Src2Mem16
| ImplicitOps
,
350 SrcMem
| ModRM
| Stack
, 0,
352 0, 0, ModRM
| SrcMem
| Priv
, ModRM
| SrcMem
| Priv
,
353 SrcNone
| ModRM
| DstMem
| Mov
, 0,
354 SrcMem16
| ModRM
| Mov
| Priv
, SrcMem
| ModRM
| ByteOp
| Priv
,
357 DstMem
| SrcImmByte
| ModRM
, DstMem
| SrcImmByte
| ModRM
| Lock
,
358 DstMem
| SrcImmByte
| ModRM
| Lock
, DstMem
| SrcImmByte
| ModRM
| Lock
,
360 0, ImplicitOps
| ModRM
| Lock
, 0, 0, 0, 0, 0, 0,
363 static u32 group2_table
[] = {
365 SrcNone
| ModRM
| Priv
, 0, 0, SrcNone
| ModRM
| Priv
,
366 SrcNone
| ModRM
| DstMem
| Mov
, 0,
367 SrcMem16
| ModRM
| Mov
| Priv
, 0,
369 0, 0, 0, 0, 0, 0, 0, 0,
372 /* EFLAGS bit definitions. */
373 #define EFLG_ID (1<<21)
374 #define EFLG_VIP (1<<20)
375 #define EFLG_VIF (1<<19)
376 #define EFLG_AC (1<<18)
377 #define EFLG_VM (1<<17)
378 #define EFLG_RF (1<<16)
379 #define EFLG_IOPL (3<<12)
380 #define EFLG_NT (1<<14)
381 #define EFLG_OF (1<<11)
382 #define EFLG_DF (1<<10)
383 #define EFLG_IF (1<<9)
384 #define EFLG_TF (1<<8)
385 #define EFLG_SF (1<<7)
386 #define EFLG_ZF (1<<6)
387 #define EFLG_AF (1<<4)
388 #define EFLG_PF (1<<2)
389 #define EFLG_CF (1<<0)
392 * Instruction emulation:
393 * Most instructions are emulated directly via a fragment of inline assembly
394 * code. This allows us to save/restore EFLAGS and thus very easily pick up
395 * any modified flags.
398 #if defined(CONFIG_X86_64)
399 #define _LO32 "k" /* force 32-bit operand */
400 #define _STK "%%rsp" /* stack pointer */
401 #elif defined(__i386__)
402 #define _LO32 "" /* force 32-bit operand */
403 #define _STK "%%esp" /* stack pointer */
407 * These EFLAGS bits are restored from saved value during emulation, and
408 * any changes are written back to the saved value after emulation.
410 #define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)
412 /* Before executing instruction: restore necessary bits in EFLAGS. */
413 #define _PRE_EFLAGS(_sav, _msk, _tmp) \
414 /* EFLAGS = (_sav & _msk) | (EFLAGS & ~_msk); _sav &= ~_msk; */ \
415 "movl %"_sav",%"_LO32 _tmp"; " \
418 "movl %"_msk",%"_LO32 _tmp"; " \
419 "andl %"_LO32 _tmp",("_STK"); " \
421 "notl %"_LO32 _tmp"; " \
422 "andl %"_LO32 _tmp",("_STK"); " \
423 "andl %"_LO32 _tmp","__stringify(BITS_PER_LONG/4)"("_STK"); " \
425 "orl %"_LO32 _tmp",("_STK"); " \
429 /* After executing instruction: write-back necessary bits in EFLAGS. */
430 #define _POST_EFLAGS(_sav, _msk, _tmp) \
431 /* _sav |= EFLAGS & _msk; */ \
434 "andl %"_msk",%"_LO32 _tmp"; " \
435 "orl %"_LO32 _tmp",%"_sav"; "
443 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix) \
445 __asm__ __volatile__ ( \
446 _PRE_EFLAGS("0", "4", "2") \
447 _op _suffix " %"_x"3,%1; " \
448 _POST_EFLAGS("0", "4", "2") \
449 : "=m" (_eflags), "=m" ((_dst).val), \
451 : _y ((_src).val), "i" (EFLAGS_MASK)); \
455 /* Raw emulation: instruction has two explicit operands. */
456 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
458 unsigned long _tmp; \
460 switch ((_dst).bytes) { \
462 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w"); \
465 ____emulate_2op(_op,_src,_dst,_eflags,_lx,_ly,"l"); \
468 ON64(____emulate_2op(_op,_src,_dst,_eflags,_qx,_qy,"q")); \
473 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
475 unsigned long _tmp; \
476 switch ((_dst).bytes) { \
478 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b"); \
481 __emulate_2op_nobyte(_op, _src, _dst, _eflags, \
482 _wx, _wy, _lx, _ly, _qx, _qy); \
487 /* Source operand is byte-sized and may be restricted to just %cl. */
488 #define emulate_2op_SrcB(_op, _src, _dst, _eflags) \
489 __emulate_2op(_op, _src, _dst, _eflags, \
490 "b", "c", "b", "c", "b", "c", "b", "c")
492 /* Source operand is byte, word, long or quad sized. */
493 #define emulate_2op_SrcV(_op, _src, _dst, _eflags) \
494 __emulate_2op(_op, _src, _dst, _eflags, \
495 "b", "q", "w", "r", _LO32, "r", "", "r")
497 /* Source operand is word, long or quad sized. */
498 #define emulate_2op_SrcV_nobyte(_op, _src, _dst, _eflags) \
499 __emulate_2op_nobyte(_op, _src, _dst, _eflags, \
500 "w", "r", _LO32, "r", "", "r")
502 /* Instruction has three operands and one operand is stored in ECX register */
503 #define __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, _suffix, _type) \
505 unsigned long _tmp; \
506 _type _clv = (_cl).val; \
507 _type _srcv = (_src).val; \
508 _type _dstv = (_dst).val; \
510 __asm__ __volatile__ ( \
511 _PRE_EFLAGS("0", "5", "2") \
512 _op _suffix " %4,%1 \n" \
513 _POST_EFLAGS("0", "5", "2") \
514 : "=m" (_eflags), "+r" (_dstv), "=&r" (_tmp) \
515 : "c" (_clv) , "r" (_srcv), "i" (EFLAGS_MASK) \
518 (_cl).val = (unsigned long) _clv; \
519 (_src).val = (unsigned long) _srcv; \
520 (_dst).val = (unsigned long) _dstv; \
523 #define emulate_2op_cl(_op, _cl, _src, _dst, _eflags) \
525 switch ((_dst).bytes) { \
527 __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
528 "w", unsigned short); \
531 __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
532 "l", unsigned int); \
535 ON64(__emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
536 "q", unsigned long)); \
541 #define __emulate_1op(_op, _dst, _eflags, _suffix) \
543 unsigned long _tmp; \
545 __asm__ __volatile__ ( \
546 _PRE_EFLAGS("0", "3", "2") \
547 _op _suffix " %1; " \
548 _POST_EFLAGS("0", "3", "2") \
549 : "=m" (_eflags), "+m" ((_dst).val), \
551 : "i" (EFLAGS_MASK)); \
554 /* Instruction has only one explicit operand (no source operand). */
555 #define emulate_1op(_op, _dst, _eflags) \
557 switch ((_dst).bytes) { \
558 case 1: __emulate_1op(_op, _dst, _eflags, "b"); break; \
559 case 2: __emulate_1op(_op, _dst, _eflags, "w"); break; \
560 case 4: __emulate_1op(_op, _dst, _eflags, "l"); break; \
561 case 8: ON64(__emulate_1op(_op, _dst, _eflags, "q")); break; \
565 /* Fetch next part of the instruction being emulated. */
566 #define insn_fetch(_type, _size, _eip) \
567 ({ unsigned long _x; \
568 rc = do_insn_fetch(ctxt, ops, (_eip), &_x, (_size)); \
569 if (rc != X86EMUL_CONTINUE) \
575 static inline unsigned long ad_mask(struct decode_cache
*c
)
577 return (1UL << (c
->ad_bytes
<< 3)) - 1;
580 /* Access/update address held in a register, based on addressing mode. */
581 static inline unsigned long
582 address_mask(struct decode_cache
*c
, unsigned long reg
)
584 if (c
->ad_bytes
== sizeof(unsigned long))
587 return reg
& ad_mask(c
);
590 static inline unsigned long
591 register_address(struct decode_cache
*c
, unsigned long base
, unsigned long reg
)
593 return base
+ address_mask(c
, reg
);
597 register_address_increment(struct decode_cache
*c
, unsigned long *reg
, int inc
)
599 if (c
->ad_bytes
== sizeof(unsigned long))
602 *reg
= (*reg
& ~ad_mask(c
)) | ((*reg
+ inc
) & ad_mask(c
));
605 static inline void jmp_rel(struct decode_cache
*c
, int rel
)
607 register_address_increment(c
, &c
->eip
, rel
);
610 static void set_seg_override(struct decode_cache
*c
, int seg
)
612 c
->has_seg_override
= true;
613 c
->seg_override
= seg
;
616 static unsigned long seg_base(struct x86_emulate_ctxt
*ctxt
, int seg
)
618 if (ctxt
->mode
== X86EMUL_MODE_PROT64
&& seg
< VCPU_SREG_FS
)
621 return kvm_x86_ops
->get_segment_base(ctxt
->vcpu
, seg
);
624 static unsigned long seg_override_base(struct x86_emulate_ctxt
*ctxt
,
625 struct decode_cache
*c
)
627 if (!c
->has_seg_override
)
630 return seg_base(ctxt
, c
->seg_override
);
633 static unsigned long es_base(struct x86_emulate_ctxt
*ctxt
)
635 return seg_base(ctxt
, VCPU_SREG_ES
);
638 static unsigned long ss_base(struct x86_emulate_ctxt
*ctxt
)
640 return seg_base(ctxt
, VCPU_SREG_SS
);
643 static int do_fetch_insn_byte(struct x86_emulate_ctxt
*ctxt
,
644 struct x86_emulate_ops
*ops
,
645 unsigned long linear
, u8
*dest
)
647 struct fetch_cache
*fc
= &ctxt
->decode
.fetch
;
651 if (linear
< fc
->start
|| linear
>= fc
->end
) {
652 size
= min(15UL, PAGE_SIZE
- offset_in_page(linear
));
653 rc
= ops
->fetch(linear
, fc
->data
, size
, ctxt
->vcpu
, NULL
);
654 if (rc
!= X86EMUL_CONTINUE
)
657 fc
->end
= linear
+ size
;
659 *dest
= fc
->data
[linear
- fc
->start
];
660 return X86EMUL_CONTINUE
;
663 static int do_insn_fetch(struct x86_emulate_ctxt
*ctxt
,
664 struct x86_emulate_ops
*ops
,
665 unsigned long eip
, void *dest
, unsigned size
)
669 /* x86 instructions are limited to 15 bytes. */
670 if (eip
+ size
- ctxt
->eip
> 15)
671 return X86EMUL_UNHANDLEABLE
;
672 eip
+= ctxt
->cs_base
;
674 rc
= do_fetch_insn_byte(ctxt
, ops
, eip
++, dest
++);
675 if (rc
!= X86EMUL_CONTINUE
)
678 return X86EMUL_CONTINUE
;
682 * Given the 'reg' portion of a ModRM byte, and a register block, return a
683 * pointer into the block that addresses the relevant register.
684 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
686 static void *decode_register(u8 modrm_reg
, unsigned long *regs
,
691 p
= ®s
[modrm_reg
];
692 if (highbyte_regs
&& modrm_reg
>= 4 && modrm_reg
< 8)
693 p
= (unsigned char *)®s
[modrm_reg
& 3] + 1;
697 static int read_descriptor(struct x86_emulate_ctxt
*ctxt
,
698 struct x86_emulate_ops
*ops
,
700 u16
*size
, unsigned long *address
, int op_bytes
)
707 rc
= ops
->read_std((unsigned long)ptr
, (unsigned long *)size
, 2,
709 if (rc
!= X86EMUL_CONTINUE
)
711 rc
= ops
->read_std((unsigned long)ptr
+ 2, address
, op_bytes
,
716 static int test_cc(unsigned int condition
, unsigned int flags
)
720 switch ((condition
& 15) >> 1) {
722 rc
|= (flags
& EFLG_OF
);
724 case 1: /* b/c/nae */
725 rc
|= (flags
& EFLG_CF
);
728 rc
|= (flags
& EFLG_ZF
);
731 rc
|= (flags
& (EFLG_CF
|EFLG_ZF
));
734 rc
|= (flags
& EFLG_SF
);
737 rc
|= (flags
& EFLG_PF
);
740 rc
|= (flags
& EFLG_ZF
);
743 rc
|= (!(flags
& EFLG_SF
) != !(flags
& EFLG_OF
));
747 /* Odd condition identifiers (lsb == 1) have inverted sense. */
748 return (!!rc
^ (condition
& 1));
751 static void decode_register_operand(struct operand
*op
,
752 struct decode_cache
*c
,
755 unsigned reg
= c
->modrm_reg
;
756 int highbyte_regs
= c
->rex_prefix
== 0;
759 reg
= (c
->b
& 7) | ((c
->rex_prefix
& 1) << 3);
761 if ((c
->d
& ByteOp
) && !inhibit_bytereg
) {
762 op
->ptr
= decode_register(reg
, c
->regs
, highbyte_regs
);
763 op
->val
= *(u8
*)op
->ptr
;
766 op
->ptr
= decode_register(reg
, c
->regs
, 0);
767 op
->bytes
= c
->op_bytes
;
770 op
->val
= *(u16
*)op
->ptr
;
773 op
->val
= *(u32
*)op
->ptr
;
776 op
->val
= *(u64
*) op
->ptr
;
780 op
->orig_val
= op
->val
;
783 static int decode_modrm(struct x86_emulate_ctxt
*ctxt
,
784 struct x86_emulate_ops
*ops
)
786 struct decode_cache
*c
= &ctxt
->decode
;
788 int index_reg
= 0, base_reg
= 0, scale
;
789 int rc
= X86EMUL_CONTINUE
;
792 c
->modrm_reg
= (c
->rex_prefix
& 4) << 1; /* REX.R */
793 index_reg
= (c
->rex_prefix
& 2) << 2; /* REX.X */
794 c
->modrm_rm
= base_reg
= (c
->rex_prefix
& 1) << 3; /* REG.B */
797 c
->modrm
= insn_fetch(u8
, 1, c
->eip
);
798 c
->modrm_mod
|= (c
->modrm
& 0xc0) >> 6;
799 c
->modrm_reg
|= (c
->modrm
& 0x38) >> 3;
800 c
->modrm_rm
|= (c
->modrm
& 0x07);
804 if (c
->modrm_mod
== 3) {
805 c
->modrm_ptr
= decode_register(c
->modrm_rm
,
806 c
->regs
, c
->d
& ByteOp
);
807 c
->modrm_val
= *(unsigned long *)c
->modrm_ptr
;
811 if (c
->ad_bytes
== 2) {
812 unsigned bx
= c
->regs
[VCPU_REGS_RBX
];
813 unsigned bp
= c
->regs
[VCPU_REGS_RBP
];
814 unsigned si
= c
->regs
[VCPU_REGS_RSI
];
815 unsigned di
= c
->regs
[VCPU_REGS_RDI
];
817 /* 16-bit ModR/M decode. */
818 switch (c
->modrm_mod
) {
820 if (c
->modrm_rm
== 6)
821 c
->modrm_ea
+= insn_fetch(u16
, 2, c
->eip
);
824 c
->modrm_ea
+= insn_fetch(s8
, 1, c
->eip
);
827 c
->modrm_ea
+= insn_fetch(u16
, 2, c
->eip
);
830 switch (c
->modrm_rm
) {
832 c
->modrm_ea
+= bx
+ si
;
835 c
->modrm_ea
+= bx
+ di
;
838 c
->modrm_ea
+= bp
+ si
;
841 c
->modrm_ea
+= bp
+ di
;
850 if (c
->modrm_mod
!= 0)
857 if (c
->modrm_rm
== 2 || c
->modrm_rm
== 3 ||
858 (c
->modrm_rm
== 6 && c
->modrm_mod
!= 0))
859 if (!c
->has_seg_override
)
860 set_seg_override(c
, VCPU_SREG_SS
);
861 c
->modrm_ea
= (u16
)c
->modrm_ea
;
863 /* 32/64-bit ModR/M decode. */
864 if ((c
->modrm_rm
& 7) == 4) {
865 sib
= insn_fetch(u8
, 1, c
->eip
);
866 index_reg
|= (sib
>> 3) & 7;
870 if ((base_reg
& 7) == 5 && c
->modrm_mod
== 0)
871 c
->modrm_ea
+= insn_fetch(s32
, 4, c
->eip
);
873 c
->modrm_ea
+= c
->regs
[base_reg
];
875 c
->modrm_ea
+= c
->regs
[index_reg
] << scale
;
876 } else if ((c
->modrm_rm
& 7) == 5 && c
->modrm_mod
== 0) {
877 if (ctxt
->mode
== X86EMUL_MODE_PROT64
)
880 c
->modrm_ea
+= c
->regs
[c
->modrm_rm
];
881 switch (c
->modrm_mod
) {
883 if (c
->modrm_rm
== 5)
884 c
->modrm_ea
+= insn_fetch(s32
, 4, c
->eip
);
887 c
->modrm_ea
+= insn_fetch(s8
, 1, c
->eip
);
890 c
->modrm_ea
+= insn_fetch(s32
, 4, c
->eip
);
898 static int decode_abs(struct x86_emulate_ctxt
*ctxt
,
899 struct x86_emulate_ops
*ops
)
901 struct decode_cache
*c
= &ctxt
->decode
;
902 int rc
= X86EMUL_CONTINUE
;
904 switch (c
->ad_bytes
) {
906 c
->modrm_ea
= insn_fetch(u16
, 2, c
->eip
);
909 c
->modrm_ea
= insn_fetch(u32
, 4, c
->eip
);
912 c
->modrm_ea
= insn_fetch(u64
, 8, c
->eip
);
920 x86_decode_insn(struct x86_emulate_ctxt
*ctxt
, struct x86_emulate_ops
*ops
)
922 struct decode_cache
*c
= &ctxt
->decode
;
923 int rc
= X86EMUL_CONTINUE
;
924 int mode
= ctxt
->mode
;
925 int def_op_bytes
, def_ad_bytes
, group
;
927 /* Shadow copy of register state. Committed on successful emulation. */
929 memset(c
, 0, sizeof(struct decode_cache
));
931 ctxt
->cs_base
= seg_base(ctxt
, VCPU_SREG_CS
);
932 memcpy(c
->regs
, ctxt
->vcpu
->arch
.regs
, sizeof c
->regs
);
935 case X86EMUL_MODE_REAL
:
936 case X86EMUL_MODE_VM86
:
937 case X86EMUL_MODE_PROT16
:
938 def_op_bytes
= def_ad_bytes
= 2;
940 case X86EMUL_MODE_PROT32
:
941 def_op_bytes
= def_ad_bytes
= 4;
944 case X86EMUL_MODE_PROT64
:
953 c
->op_bytes
= def_op_bytes
;
954 c
->ad_bytes
= def_ad_bytes
;
956 /* Legacy prefixes. */
958 switch (c
->b
= insn_fetch(u8
, 1, c
->eip
)) {
959 case 0x66: /* operand-size override */
960 /* switch between 2/4 bytes */
961 c
->op_bytes
= def_op_bytes
^ 6;
963 case 0x67: /* address-size override */
964 if (mode
== X86EMUL_MODE_PROT64
)
965 /* switch between 4/8 bytes */
966 c
->ad_bytes
= def_ad_bytes
^ 12;
968 /* switch between 2/4 bytes */
969 c
->ad_bytes
= def_ad_bytes
^ 6;
971 case 0x26: /* ES override */
972 case 0x2e: /* CS override */
973 case 0x36: /* SS override */
974 case 0x3e: /* DS override */
975 set_seg_override(c
, (c
->b
>> 3) & 3);
977 case 0x64: /* FS override */
978 case 0x65: /* GS override */
979 set_seg_override(c
, c
->b
& 7);
981 case 0x40 ... 0x4f: /* REX */
982 if (mode
!= X86EMUL_MODE_PROT64
)
984 c
->rex_prefix
= c
->b
;
986 case 0xf0: /* LOCK */
989 case 0xf2: /* REPNE/REPNZ */
990 c
->rep_prefix
= REPNE_PREFIX
;
992 case 0xf3: /* REP/REPE/REPZ */
993 c
->rep_prefix
= REPE_PREFIX
;
999 /* Any legacy prefix after a REX prefix nullifies its effect. */
1008 if (c
->rex_prefix
& 8)
1009 c
->op_bytes
= 8; /* REX.W */
1011 /* Opcode byte(s). */
1012 c
->d
= opcode_table
[c
->b
];
1014 /* Two-byte opcode? */
1017 c
->b
= insn_fetch(u8
, 1, c
->eip
);
1018 c
->d
= twobyte_table
[c
->b
];
1023 group
= c
->d
& GroupMask
;
1024 c
->modrm
= insn_fetch(u8
, 1, c
->eip
);
1027 group
= (group
<< 3) + ((c
->modrm
>> 3) & 7);
1028 if ((c
->d
& GroupDual
) && (c
->modrm
>> 6) == 3)
1029 c
->d
= group2_table
[group
];
1031 c
->d
= group_table
[group
];
1036 DPRINTF("Cannot emulate %02x\n", c
->b
);
1040 if (mode
== X86EMUL_MODE_PROT64
&& (c
->d
& Stack
))
1043 /* ModRM and SIB bytes. */
1045 rc
= decode_modrm(ctxt
, ops
);
1046 else if (c
->d
& MemAbs
)
1047 rc
= decode_abs(ctxt
, ops
);
1048 if (rc
!= X86EMUL_CONTINUE
)
1051 if (!c
->has_seg_override
)
1052 set_seg_override(c
, VCPU_SREG_DS
);
1054 if (!(!c
->twobyte
&& c
->b
== 0x8d))
1055 c
->modrm_ea
+= seg_override_base(ctxt
, c
);
1057 if (c
->ad_bytes
!= 8)
1058 c
->modrm_ea
= (u32
)c
->modrm_ea
;
1060 * Decode and fetch the source operand: register, memory
1063 switch (c
->d
& SrcMask
) {
1067 decode_register_operand(&c
->src
, c
, 0);
1076 c
->src
.bytes
= (c
->d
& ByteOp
) ? 1 :
1078 /* Don't fetch the address for invlpg: it could be unmapped. */
1079 if (c
->twobyte
&& c
->b
== 0x01 && c
->modrm_reg
== 7)
1083 * For instructions with a ModR/M byte, switch to register
1084 * access if Mod = 3.
1086 if ((c
->d
& ModRM
) && c
->modrm_mod
== 3) {
1087 c
->src
.type
= OP_REG
;
1088 c
->src
.val
= c
->modrm_val
;
1089 c
->src
.ptr
= c
->modrm_ptr
;
1092 c
->src
.type
= OP_MEM
;
1096 c
->src
.type
= OP_IMM
;
1097 c
->src
.ptr
= (unsigned long *)c
->eip
;
1098 c
->src
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
1099 if (c
->src
.bytes
== 8)
1101 /* NB. Immediates are sign-extended as necessary. */
1102 switch (c
->src
.bytes
) {
1104 c
->src
.val
= insn_fetch(s8
, 1, c
->eip
);
1107 c
->src
.val
= insn_fetch(s16
, 2, c
->eip
);
1110 c
->src
.val
= insn_fetch(s32
, 4, c
->eip
);
1113 if ((c
->d
& SrcMask
) == SrcImmU
) {
1114 switch (c
->src
.bytes
) {
1119 c
->src
.val
&= 0xffff;
1122 c
->src
.val
&= 0xffffffff;
1129 c
->src
.type
= OP_IMM
;
1130 c
->src
.ptr
= (unsigned long *)c
->eip
;
1132 if ((c
->d
& SrcMask
) == SrcImmByte
)
1133 c
->src
.val
= insn_fetch(s8
, 1, c
->eip
);
1135 c
->src
.val
= insn_fetch(u8
, 1, c
->eip
);
1144 * Decode and fetch the second source operand: register, memory
1147 switch (c
->d
& Src2Mask
) {
1152 c
->src2
.val
= c
->regs
[VCPU_REGS_RCX
] & 0x8;
1155 c
->src2
.type
= OP_IMM
;
1156 c
->src2
.ptr
= (unsigned long *)c
->eip
;
1158 c
->src2
.val
= insn_fetch(u8
, 1, c
->eip
);
1161 c
->src2
.type
= OP_IMM
;
1162 c
->src2
.ptr
= (unsigned long *)c
->eip
;
1164 c
->src2
.val
= insn_fetch(u16
, 2, c
->eip
);
1172 c
->src2
.type
= OP_MEM
;
1176 /* Decode and fetch the destination operand: register or memory. */
1177 switch (c
->d
& DstMask
) {
1179 /* Special instructions do their own operand decoding. */
1182 decode_register_operand(&c
->dst
, c
,
1183 c
->twobyte
&& (c
->b
== 0xb6 || c
->b
== 0xb7));
1186 if ((c
->d
& ModRM
) && c
->modrm_mod
== 3) {
1187 c
->dst
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
1188 c
->dst
.type
= OP_REG
;
1189 c
->dst
.val
= c
->dst
.orig_val
= c
->modrm_val
;
1190 c
->dst
.ptr
= c
->modrm_ptr
;
1193 c
->dst
.type
= OP_MEM
;
1196 c
->dst
.type
= OP_REG
;
1197 c
->dst
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
1198 c
->dst
.ptr
= &c
->regs
[VCPU_REGS_RAX
];
1199 switch (c
->dst
.bytes
) {
1201 c
->dst
.val
= *(u8
*)c
->dst
.ptr
;
1204 c
->dst
.val
= *(u16
*)c
->dst
.ptr
;
1207 c
->dst
.val
= *(u32
*)c
->dst
.ptr
;
1210 c
->dst
.val
= *(u64
*)c
->dst
.ptr
;
1213 c
->dst
.orig_val
= c
->dst
.val
;
1217 if (c
->rip_relative
)
1218 c
->modrm_ea
+= c
->eip
;
1221 return (rc
== X86EMUL_UNHANDLEABLE
) ? -1 : 0;
1224 static inline void emulate_push(struct x86_emulate_ctxt
*ctxt
)
1226 struct decode_cache
*c
= &ctxt
->decode
;
1228 c
->dst
.type
= OP_MEM
;
1229 c
->dst
.bytes
= c
->op_bytes
;
1230 c
->dst
.val
= c
->src
.val
;
1231 register_address_increment(c
, &c
->regs
[VCPU_REGS_RSP
], -c
->op_bytes
);
1232 c
->dst
.ptr
= (void *) register_address(c
, ss_base(ctxt
),
1233 c
->regs
[VCPU_REGS_RSP
]);
1236 static int emulate_pop(struct x86_emulate_ctxt
*ctxt
,
1237 struct x86_emulate_ops
*ops
,
1238 void *dest
, int len
)
1240 struct decode_cache
*c
= &ctxt
->decode
;
1243 rc
= ops
->read_emulated(register_address(c
, ss_base(ctxt
),
1244 c
->regs
[VCPU_REGS_RSP
]),
1245 dest
, len
, ctxt
->vcpu
);
1246 if (rc
!= X86EMUL_CONTINUE
)
1249 register_address_increment(c
, &c
->regs
[VCPU_REGS_RSP
], len
);
1253 static int emulate_popf(struct x86_emulate_ctxt
*ctxt
,
1254 struct x86_emulate_ops
*ops
,
1255 void *dest
, int len
)
1258 unsigned long val
, change_mask
;
1259 int iopl
= (ctxt
->eflags
& X86_EFLAGS_IOPL
) >> IOPL_SHIFT
;
1260 int cpl
= ops
->cpl(ctxt
->vcpu
);
1262 rc
= emulate_pop(ctxt
, ops
, &val
, len
);
1263 if (rc
!= X86EMUL_CONTINUE
)
1266 change_mask
= EFLG_CF
| EFLG_PF
| EFLG_AF
| EFLG_ZF
| EFLG_SF
| EFLG_OF
1267 | EFLG_TF
| EFLG_DF
| EFLG_NT
| EFLG_RF
| EFLG_AC
| EFLG_ID
;
1269 switch(ctxt
->mode
) {
1270 case X86EMUL_MODE_PROT64
:
1271 case X86EMUL_MODE_PROT32
:
1272 case X86EMUL_MODE_PROT16
:
1274 change_mask
|= EFLG_IOPL
;
1276 change_mask
|= EFLG_IF
;
1278 case X86EMUL_MODE_VM86
:
1280 kvm_inject_gp(ctxt
->vcpu
, 0);
1281 return X86EMUL_PROPAGATE_FAULT
;
1283 change_mask
|= EFLG_IF
;
1285 default: /* real mode */
1286 change_mask
|= (EFLG_IOPL
| EFLG_IF
);
1290 *(unsigned long *)dest
=
1291 (ctxt
->eflags
& ~change_mask
) | (val
& change_mask
);
1296 static void emulate_push_sreg(struct x86_emulate_ctxt
*ctxt
, int seg
)
1298 struct decode_cache
*c
= &ctxt
->decode
;
1299 struct kvm_segment segment
;
1301 kvm_x86_ops
->get_segment(ctxt
->vcpu
, &segment
, seg
);
1303 c
->src
.val
= segment
.selector
;
1307 static int emulate_pop_sreg(struct x86_emulate_ctxt
*ctxt
,
1308 struct x86_emulate_ops
*ops
, int seg
)
1310 struct decode_cache
*c
= &ctxt
->decode
;
1311 unsigned long selector
;
1314 rc
= emulate_pop(ctxt
, ops
, &selector
, c
->op_bytes
);
1315 if (rc
!= X86EMUL_CONTINUE
)
1318 rc
= kvm_load_segment_descriptor(ctxt
->vcpu
, (u16
)selector
, seg
);
1322 static void emulate_pusha(struct x86_emulate_ctxt
*ctxt
)
1324 struct decode_cache
*c
= &ctxt
->decode
;
1325 unsigned long old_esp
= c
->regs
[VCPU_REGS_RSP
];
1326 int reg
= VCPU_REGS_RAX
;
1328 while (reg
<= VCPU_REGS_RDI
) {
1329 (reg
== VCPU_REGS_RSP
) ?
1330 (c
->src
.val
= old_esp
) : (c
->src
.val
= c
->regs
[reg
]);
1337 static int emulate_popa(struct x86_emulate_ctxt
*ctxt
,
1338 struct x86_emulate_ops
*ops
)
1340 struct decode_cache
*c
= &ctxt
->decode
;
1341 int rc
= X86EMUL_CONTINUE
;
1342 int reg
= VCPU_REGS_RDI
;
1344 while (reg
>= VCPU_REGS_RAX
) {
1345 if (reg
== VCPU_REGS_RSP
) {
1346 register_address_increment(c
, &c
->regs
[VCPU_REGS_RSP
],
1351 rc
= emulate_pop(ctxt
, ops
, &c
->regs
[reg
], c
->op_bytes
);
1352 if (rc
!= X86EMUL_CONTINUE
)
1359 static inline int emulate_grp1a(struct x86_emulate_ctxt
*ctxt
,
1360 struct x86_emulate_ops
*ops
)
1362 struct decode_cache
*c
= &ctxt
->decode
;
1364 return emulate_pop(ctxt
, ops
, &c
->dst
.val
, c
->dst
.bytes
);
1367 static inline void emulate_grp2(struct x86_emulate_ctxt
*ctxt
)
1369 struct decode_cache
*c
= &ctxt
->decode
;
1370 switch (c
->modrm_reg
) {
1372 emulate_2op_SrcB("rol", c
->src
, c
->dst
, ctxt
->eflags
);
1375 emulate_2op_SrcB("ror", c
->src
, c
->dst
, ctxt
->eflags
);
1378 emulate_2op_SrcB("rcl", c
->src
, c
->dst
, ctxt
->eflags
);
1381 emulate_2op_SrcB("rcr", c
->src
, c
->dst
, ctxt
->eflags
);
1383 case 4: /* sal/shl */
1384 case 6: /* sal/shl */
1385 emulate_2op_SrcB("sal", c
->src
, c
->dst
, ctxt
->eflags
);
1388 emulate_2op_SrcB("shr", c
->src
, c
->dst
, ctxt
->eflags
);
1391 emulate_2op_SrcB("sar", c
->src
, c
->dst
, ctxt
->eflags
);
1396 static inline int emulate_grp3(struct x86_emulate_ctxt
*ctxt
,
1397 struct x86_emulate_ops
*ops
)
1399 struct decode_cache
*c
= &ctxt
->decode
;
1400 int rc
= X86EMUL_CONTINUE
;
1402 switch (c
->modrm_reg
) {
1403 case 0 ... 1: /* test */
1404 emulate_2op_SrcV("test", c
->src
, c
->dst
, ctxt
->eflags
);
1407 c
->dst
.val
= ~c
->dst
.val
;
1410 emulate_1op("neg", c
->dst
, ctxt
->eflags
);
1413 DPRINTF("Cannot emulate %02x\n", c
->b
);
1414 rc
= X86EMUL_UNHANDLEABLE
;
1420 static inline int emulate_grp45(struct x86_emulate_ctxt
*ctxt
,
1421 struct x86_emulate_ops
*ops
)
1423 struct decode_cache
*c
= &ctxt
->decode
;
1425 switch (c
->modrm_reg
) {
1427 emulate_1op("inc", c
->dst
, ctxt
->eflags
);
1430 emulate_1op("dec", c
->dst
, ctxt
->eflags
);
1432 case 2: /* call near abs */ {
1435 c
->eip
= c
->src
.val
;
1436 c
->src
.val
= old_eip
;
1440 case 4: /* jmp abs */
1441 c
->eip
= c
->src
.val
;
1447 return X86EMUL_CONTINUE
;
1450 static inline int emulate_grp9(struct x86_emulate_ctxt
*ctxt
,
1451 struct x86_emulate_ops
*ops
,
1452 unsigned long memop
)
1454 struct decode_cache
*c
= &ctxt
->decode
;
1458 rc
= ops
->read_emulated(memop
, &old
, 8, ctxt
->vcpu
);
1459 if (rc
!= X86EMUL_CONTINUE
)
1462 if (((u32
) (old
>> 0) != (u32
) c
->regs
[VCPU_REGS_RAX
]) ||
1463 ((u32
) (old
>> 32) != (u32
) c
->regs
[VCPU_REGS_RDX
])) {
1465 c
->regs
[VCPU_REGS_RAX
] = (u32
) (old
>> 0);
1466 c
->regs
[VCPU_REGS_RDX
] = (u32
) (old
>> 32);
1467 ctxt
->eflags
&= ~EFLG_ZF
;
1470 new = ((u64
)c
->regs
[VCPU_REGS_RCX
] << 32) |
1471 (u32
) c
->regs
[VCPU_REGS_RBX
];
1473 rc
= ops
->cmpxchg_emulated(memop
, &old
, &new, 8, ctxt
->vcpu
);
1474 if (rc
!= X86EMUL_CONTINUE
)
1476 ctxt
->eflags
|= EFLG_ZF
;
1478 return X86EMUL_CONTINUE
;
1481 static int emulate_ret_far(struct x86_emulate_ctxt
*ctxt
,
1482 struct x86_emulate_ops
*ops
)
1484 struct decode_cache
*c
= &ctxt
->decode
;
1488 rc
= emulate_pop(ctxt
, ops
, &c
->eip
, c
->op_bytes
);
1489 if (rc
!= X86EMUL_CONTINUE
)
1491 if (c
->op_bytes
== 4)
1492 c
->eip
= (u32
)c
->eip
;
1493 rc
= emulate_pop(ctxt
, ops
, &cs
, c
->op_bytes
);
1494 if (rc
!= X86EMUL_CONTINUE
)
1496 rc
= kvm_load_segment_descriptor(ctxt
->vcpu
, (u16
)cs
, VCPU_SREG_CS
);
1500 static inline int writeback(struct x86_emulate_ctxt
*ctxt
,
1501 struct x86_emulate_ops
*ops
)
1504 struct decode_cache
*c
= &ctxt
->decode
;
1506 switch (c
->dst
.type
) {
1508 /* The 4-byte case *is* correct:
1509 * in 64-bit mode we zero-extend.
1511 switch (c
->dst
.bytes
) {
1513 *(u8
*)c
->dst
.ptr
= (u8
)c
->dst
.val
;
1516 *(u16
*)c
->dst
.ptr
= (u16
)c
->dst
.val
;
1519 *c
->dst
.ptr
= (u32
)c
->dst
.val
;
1520 break; /* 64b: zero-ext */
1522 *c
->dst
.ptr
= c
->dst
.val
;
1528 rc
= ops
->cmpxchg_emulated(
1529 (unsigned long)c
->dst
.ptr
,
1535 rc
= ops
->write_emulated(
1536 (unsigned long)c
->dst
.ptr
,
1540 if (rc
!= X86EMUL_CONTINUE
)
1549 return X86EMUL_CONTINUE
;
1552 static void toggle_interruptibility(struct x86_emulate_ctxt
*ctxt
, u32 mask
)
1554 u32 int_shadow
= kvm_x86_ops
->get_interrupt_shadow(ctxt
->vcpu
, mask
);
1556 * an sti; sti; sequence only disable interrupts for the first
1557 * instruction. So, if the last instruction, be it emulated or
1558 * not, left the system with the INT_STI flag enabled, it
1559 * means that the last instruction is an sti. We should not
1560 * leave the flag on in this case. The same goes for mov ss
1562 if (!(int_shadow
& mask
))
1563 ctxt
->interruptibility
= mask
;
1567 setup_syscalls_segments(struct x86_emulate_ctxt
*ctxt
,
1568 struct kvm_segment
*cs
, struct kvm_segment
*ss
)
1570 memset(cs
, 0, sizeof(struct kvm_segment
));
1571 kvm_x86_ops
->get_segment(ctxt
->vcpu
, cs
, VCPU_SREG_CS
);
1572 memset(ss
, 0, sizeof(struct kvm_segment
));
1574 cs
->l
= 0; /* will be adjusted later */
1575 cs
->base
= 0; /* flat segment */
1576 cs
->g
= 1; /* 4kb granularity */
1577 cs
->limit
= 0xffffffff; /* 4GB limit */
1578 cs
->type
= 0x0b; /* Read, Execute, Accessed */
1580 cs
->dpl
= 0; /* will be adjusted later */
1585 ss
->base
= 0; /* flat segment */
1586 ss
->limit
= 0xffffffff; /* 4GB limit */
1587 ss
->g
= 1; /* 4kb granularity */
1589 ss
->type
= 0x03; /* Read/Write, Accessed */
1590 ss
->db
= 1; /* 32bit stack segment */
1596 emulate_syscall(struct x86_emulate_ctxt
*ctxt
)
1598 struct decode_cache
*c
= &ctxt
->decode
;
1599 struct kvm_segment cs
, ss
;
1602 /* syscall is not available in real mode */
1603 if (ctxt
->mode
== X86EMUL_MODE_REAL
||
1604 ctxt
->mode
== X86EMUL_MODE_VM86
) {
1605 kvm_queue_exception(ctxt
->vcpu
, UD_VECTOR
);
1606 return X86EMUL_PROPAGATE_FAULT
;
1609 setup_syscalls_segments(ctxt
, &cs
, &ss
);
1611 kvm_x86_ops
->get_msr(ctxt
->vcpu
, MSR_STAR
, &msr_data
);
1613 cs
.selector
= (u16
)(msr_data
& 0xfffc);
1614 ss
.selector
= (u16
)(msr_data
+ 8);
1616 if (is_long_mode(ctxt
->vcpu
)) {
1620 kvm_x86_ops
->set_segment(ctxt
->vcpu
, &cs
, VCPU_SREG_CS
);
1621 kvm_x86_ops
->set_segment(ctxt
->vcpu
, &ss
, VCPU_SREG_SS
);
1623 c
->regs
[VCPU_REGS_RCX
] = c
->eip
;
1624 if (is_long_mode(ctxt
->vcpu
)) {
1625 #ifdef CONFIG_X86_64
1626 c
->regs
[VCPU_REGS_R11
] = ctxt
->eflags
& ~EFLG_RF
;
1628 kvm_x86_ops
->get_msr(ctxt
->vcpu
,
1629 ctxt
->mode
== X86EMUL_MODE_PROT64
?
1630 MSR_LSTAR
: MSR_CSTAR
, &msr_data
);
1633 kvm_x86_ops
->get_msr(ctxt
->vcpu
, MSR_SYSCALL_MASK
, &msr_data
);
1634 ctxt
->eflags
&= ~(msr_data
| EFLG_RF
);
1638 kvm_x86_ops
->get_msr(ctxt
->vcpu
, MSR_STAR
, &msr_data
);
1639 c
->eip
= (u32
)msr_data
;
1641 ctxt
->eflags
&= ~(EFLG_VM
| EFLG_IF
| EFLG_RF
);
1644 return X86EMUL_CONTINUE
;
1648 emulate_sysenter(struct x86_emulate_ctxt
*ctxt
)
1650 struct decode_cache
*c
= &ctxt
->decode
;
1651 struct kvm_segment cs
, ss
;
1654 /* inject #GP if in real mode */
1655 if (ctxt
->mode
== X86EMUL_MODE_REAL
) {
1656 kvm_inject_gp(ctxt
->vcpu
, 0);
1657 return X86EMUL_PROPAGATE_FAULT
;
1660 /* XXX sysenter/sysexit have not been tested in 64bit mode.
1661 * Therefore, we inject an #UD.
1663 if (ctxt
->mode
== X86EMUL_MODE_PROT64
) {
1664 kvm_queue_exception(ctxt
->vcpu
, UD_VECTOR
);
1665 return X86EMUL_PROPAGATE_FAULT
;
1668 setup_syscalls_segments(ctxt
, &cs
, &ss
);
1670 kvm_x86_ops
->get_msr(ctxt
->vcpu
, MSR_IA32_SYSENTER_CS
, &msr_data
);
1671 switch (ctxt
->mode
) {
1672 case X86EMUL_MODE_PROT32
:
1673 if ((msr_data
& 0xfffc) == 0x0) {
1674 kvm_inject_gp(ctxt
->vcpu
, 0);
1675 return X86EMUL_PROPAGATE_FAULT
;
1678 case X86EMUL_MODE_PROT64
:
1679 if (msr_data
== 0x0) {
1680 kvm_inject_gp(ctxt
->vcpu
, 0);
1681 return X86EMUL_PROPAGATE_FAULT
;
1686 ctxt
->eflags
&= ~(EFLG_VM
| EFLG_IF
| EFLG_RF
);
1687 cs
.selector
= (u16
)msr_data
;
1688 cs
.selector
&= ~SELECTOR_RPL_MASK
;
1689 ss
.selector
= cs
.selector
+ 8;
1690 ss
.selector
&= ~SELECTOR_RPL_MASK
;
1691 if (ctxt
->mode
== X86EMUL_MODE_PROT64
1692 || is_long_mode(ctxt
->vcpu
)) {
1697 kvm_x86_ops
->set_segment(ctxt
->vcpu
, &cs
, VCPU_SREG_CS
);
1698 kvm_x86_ops
->set_segment(ctxt
->vcpu
, &ss
, VCPU_SREG_SS
);
1700 kvm_x86_ops
->get_msr(ctxt
->vcpu
, MSR_IA32_SYSENTER_EIP
, &msr_data
);
1703 kvm_x86_ops
->get_msr(ctxt
->vcpu
, MSR_IA32_SYSENTER_ESP
, &msr_data
);
1704 c
->regs
[VCPU_REGS_RSP
] = msr_data
;
1706 return X86EMUL_CONTINUE
;
1710 emulate_sysexit(struct x86_emulate_ctxt
*ctxt
)
1712 struct decode_cache
*c
= &ctxt
->decode
;
1713 struct kvm_segment cs
, ss
;
1717 /* inject #GP if in real mode or Virtual 8086 mode */
1718 if (ctxt
->mode
== X86EMUL_MODE_REAL
||
1719 ctxt
->mode
== X86EMUL_MODE_VM86
) {
1720 kvm_inject_gp(ctxt
->vcpu
, 0);
1721 return X86EMUL_PROPAGATE_FAULT
;
1724 setup_syscalls_segments(ctxt
, &cs
, &ss
);
1726 if ((c
->rex_prefix
& 0x8) != 0x0)
1727 usermode
= X86EMUL_MODE_PROT64
;
1729 usermode
= X86EMUL_MODE_PROT32
;
1733 kvm_x86_ops
->get_msr(ctxt
->vcpu
, MSR_IA32_SYSENTER_CS
, &msr_data
);
1735 case X86EMUL_MODE_PROT32
:
1736 cs
.selector
= (u16
)(msr_data
+ 16);
1737 if ((msr_data
& 0xfffc) == 0x0) {
1738 kvm_inject_gp(ctxt
->vcpu
, 0);
1739 return X86EMUL_PROPAGATE_FAULT
;
1741 ss
.selector
= (u16
)(msr_data
+ 24);
1743 case X86EMUL_MODE_PROT64
:
1744 cs
.selector
= (u16
)(msr_data
+ 32);
1745 if (msr_data
== 0x0) {
1746 kvm_inject_gp(ctxt
->vcpu
, 0);
1747 return X86EMUL_PROPAGATE_FAULT
;
1749 ss
.selector
= cs
.selector
+ 8;
1754 cs
.selector
|= SELECTOR_RPL_MASK
;
1755 ss
.selector
|= SELECTOR_RPL_MASK
;
1757 kvm_x86_ops
->set_segment(ctxt
->vcpu
, &cs
, VCPU_SREG_CS
);
1758 kvm_x86_ops
->set_segment(ctxt
->vcpu
, &ss
, VCPU_SREG_SS
);
1760 c
->eip
= ctxt
->vcpu
->arch
.regs
[VCPU_REGS_RDX
];
1761 c
->regs
[VCPU_REGS_RSP
] = ctxt
->vcpu
->arch
.regs
[VCPU_REGS_RCX
];
1763 return X86EMUL_CONTINUE
;
1766 static bool emulator_bad_iopl(struct x86_emulate_ctxt
*ctxt
,
1767 struct x86_emulate_ops
*ops
)
1770 if (ctxt
->mode
== X86EMUL_MODE_REAL
)
1772 if (ctxt
->mode
== X86EMUL_MODE_VM86
)
1774 iopl
= (ctxt
->eflags
& X86_EFLAGS_IOPL
) >> IOPL_SHIFT
;
1775 return ops
->cpl(ctxt
->vcpu
) > iopl
;
1778 static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt
*ctxt
,
1779 struct x86_emulate_ops
*ops
,
1782 struct kvm_segment tr_seg
;
1785 u8 perm
, bit_idx
= port
& 0x7;
1786 unsigned mask
= (1 << len
) - 1;
1788 kvm_get_segment(ctxt
->vcpu
, &tr_seg
, VCPU_SREG_TR
);
1789 if (tr_seg
.unusable
)
1791 if (tr_seg
.limit
< 103)
1793 r
= ops
->read_std(tr_seg
.base
+ 102, &io_bitmap_ptr
, 2, ctxt
->vcpu
,
1795 if (r
!= X86EMUL_CONTINUE
)
1797 if (io_bitmap_ptr
+ port
/8 > tr_seg
.limit
)
1799 r
= ops
->read_std(tr_seg
.base
+ io_bitmap_ptr
+ port
/8, &perm
, 1,
1801 if (r
!= X86EMUL_CONTINUE
)
1803 if ((perm
>> bit_idx
) & mask
)
1808 static bool emulator_io_permited(struct x86_emulate_ctxt
*ctxt
,
1809 struct x86_emulate_ops
*ops
,
1812 if (emulator_bad_iopl(ctxt
, ops
))
1813 if (!emulator_io_port_access_allowed(ctxt
, ops
, port
, len
))
1819 x86_emulate_insn(struct x86_emulate_ctxt
*ctxt
, struct x86_emulate_ops
*ops
)
1821 unsigned long memop
= 0;
1823 unsigned long saved_eip
= 0;
1824 struct decode_cache
*c
= &ctxt
->decode
;
1827 int rc
= X86EMUL_CONTINUE
;
1829 ctxt
->interruptibility
= 0;
1831 /* Shadow copy of register state. Committed on successful emulation.
1832 * NOTE: we can copy them from vcpu as x86_decode_insn() doesn't
1836 memcpy(c
->regs
, ctxt
->vcpu
->arch
.regs
, sizeof c
->regs
);
1839 if (ctxt
->mode
== X86EMUL_MODE_PROT64
&& (c
->d
& No64
)) {
1840 kvm_queue_exception(ctxt
->vcpu
, UD_VECTOR
);
1844 /* LOCK prefix is allowed only with some instructions */
1845 if (c
->lock_prefix
&& !(c
->d
& Lock
)) {
1846 kvm_queue_exception(ctxt
->vcpu
, UD_VECTOR
);
1850 /* Privileged instruction can be executed only in CPL=0 */
1851 if ((c
->d
& Priv
) && ops
->cpl(ctxt
->vcpu
)) {
1852 kvm_inject_gp(ctxt
->vcpu
, 0);
1856 if (((c
->d
& ModRM
) && (c
->modrm_mod
!= 3)) || (c
->d
& MemAbs
))
1857 memop
= c
->modrm_ea
;
1859 if (c
->rep_prefix
&& (c
->d
& String
)) {
1860 /* All REP prefixes have the same first termination condition */
1861 if (address_mask(c
, c
->regs
[VCPU_REGS_RCX
]) == 0) {
1862 kvm_rip_write(ctxt
->vcpu
, c
->eip
);
1865 /* The second termination condition only applies for REPE
1866 * and REPNE. Test if the repeat string operation prefix is
1867 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
1868 * corresponding termination condition according to:
1869 * - if REPE/REPZ and ZF = 0 then done
1870 * - if REPNE/REPNZ and ZF = 1 then done
1872 if ((c
->b
== 0xa6) || (c
->b
== 0xa7) ||
1873 (c
->b
== 0xae) || (c
->b
== 0xaf)) {
1874 if ((c
->rep_prefix
== REPE_PREFIX
) &&
1875 ((ctxt
->eflags
& EFLG_ZF
) == 0)) {
1876 kvm_rip_write(ctxt
->vcpu
, c
->eip
);
1879 if ((c
->rep_prefix
== REPNE_PREFIX
) &&
1880 ((ctxt
->eflags
& EFLG_ZF
) == EFLG_ZF
)) {
1881 kvm_rip_write(ctxt
->vcpu
, c
->eip
);
1885 register_address_increment(c
, &c
->regs
[VCPU_REGS_RCX
], -1);
1889 if (c
->src
.type
== OP_MEM
) {
1890 c
->src
.ptr
= (unsigned long *)memop
;
1892 rc
= ops
->read_emulated((unsigned long)c
->src
.ptr
,
1896 if (rc
!= X86EMUL_CONTINUE
)
1898 c
->src
.orig_val
= c
->src
.val
;
1901 if (c
->src2
.type
== OP_MEM
) {
1902 c
->src2
.ptr
= (unsigned long *)(memop
+ c
->src
.bytes
);
1904 rc
= ops
->read_emulated((unsigned long)c
->src2
.ptr
,
1908 if (rc
!= X86EMUL_CONTINUE
)
1912 if ((c
->d
& DstMask
) == ImplicitOps
)
1916 if (c
->dst
.type
== OP_MEM
) {
1917 c
->dst
.ptr
= (unsigned long *)memop
;
1918 c
->dst
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
1921 unsigned long mask
= ~(c
->dst
.bytes
* 8 - 1);
1923 c
->dst
.ptr
= (void *)c
->dst
.ptr
+
1924 (c
->src
.val
& mask
) / 8;
1926 if (!(c
->d
& Mov
)) {
1927 /* optimisation - avoid slow emulated read */
1928 rc
= ops
->read_emulated((unsigned long)c
->dst
.ptr
,
1932 if (rc
!= X86EMUL_CONTINUE
)
1936 c
->dst
.orig_val
= c
->dst
.val
;
1946 emulate_2op_SrcV("add", c
->src
, c
->dst
, ctxt
->eflags
);
1948 case 0x06: /* push es */
1949 emulate_push_sreg(ctxt
, VCPU_SREG_ES
);
1951 case 0x07: /* pop es */
1952 rc
= emulate_pop_sreg(ctxt
, ops
, VCPU_SREG_ES
);
1953 if (rc
!= X86EMUL_CONTINUE
)
1958 emulate_2op_SrcV("or", c
->src
, c
->dst
, ctxt
->eflags
);
1960 case 0x0e: /* push cs */
1961 emulate_push_sreg(ctxt
, VCPU_SREG_CS
);
1965 emulate_2op_SrcV("adc", c
->src
, c
->dst
, ctxt
->eflags
);
1967 case 0x16: /* push ss */
1968 emulate_push_sreg(ctxt
, VCPU_SREG_SS
);
1970 case 0x17: /* pop ss */
1971 rc
= emulate_pop_sreg(ctxt
, ops
, VCPU_SREG_SS
);
1972 if (rc
!= X86EMUL_CONTINUE
)
1977 emulate_2op_SrcV("sbb", c
->src
, c
->dst
, ctxt
->eflags
);
1979 case 0x1e: /* push ds */
1980 emulate_push_sreg(ctxt
, VCPU_SREG_DS
);
1982 case 0x1f: /* pop ds */
1983 rc
= emulate_pop_sreg(ctxt
, ops
, VCPU_SREG_DS
);
1984 if (rc
!= X86EMUL_CONTINUE
)
1989 emulate_2op_SrcV("and", c
->src
, c
->dst
, ctxt
->eflags
);
1993 emulate_2op_SrcV("sub", c
->src
, c
->dst
, ctxt
->eflags
);
1997 emulate_2op_SrcV("xor", c
->src
, c
->dst
, ctxt
->eflags
);
2001 emulate_2op_SrcV("cmp", c
->src
, c
->dst
, ctxt
->eflags
);
2003 case 0x40 ... 0x47: /* inc r16/r32 */
2004 emulate_1op("inc", c
->dst
, ctxt
->eflags
);
2006 case 0x48 ... 0x4f: /* dec r16/r32 */
2007 emulate_1op("dec", c
->dst
, ctxt
->eflags
);
2009 case 0x50 ... 0x57: /* push reg */
2012 case 0x58 ... 0x5f: /* pop reg */
2014 rc
= emulate_pop(ctxt
, ops
, &c
->dst
.val
, c
->op_bytes
);
2015 if (rc
!= X86EMUL_CONTINUE
)
2018 case 0x60: /* pusha */
2019 emulate_pusha(ctxt
);
2021 case 0x61: /* popa */
2022 rc
= emulate_popa(ctxt
, ops
);
2023 if (rc
!= X86EMUL_CONTINUE
)
2026 case 0x63: /* movsxd */
2027 if (ctxt
->mode
!= X86EMUL_MODE_PROT64
)
2028 goto cannot_emulate
;
2029 c
->dst
.val
= (s32
) c
->src
.val
;
2031 case 0x68: /* push imm */
2032 case 0x6a: /* push imm8 */
2035 case 0x6c: /* insb */
2036 case 0x6d: /* insw/insd */
2037 if (!emulator_io_permited(ctxt
, ops
, c
->regs
[VCPU_REGS_RDX
],
2038 (c
->d
& ByteOp
) ? 1 : c
->op_bytes
)) {
2039 kvm_inject_gp(ctxt
->vcpu
, 0);
2042 if (kvm_emulate_pio_string(ctxt
->vcpu
,
2044 (c
->d
& ByteOp
) ? 1 : c
->op_bytes
,
2046 address_mask(c
, c
->regs
[VCPU_REGS_RCX
]) : 1,
2047 (ctxt
->eflags
& EFLG_DF
),
2048 register_address(c
, es_base(ctxt
),
2049 c
->regs
[VCPU_REGS_RDI
]),
2051 c
->regs
[VCPU_REGS_RDX
]) == 0) {
2056 case 0x6e: /* outsb */
2057 case 0x6f: /* outsw/outsd */
2058 if (!emulator_io_permited(ctxt
, ops
, c
->regs
[VCPU_REGS_RDX
],
2059 (c
->d
& ByteOp
) ? 1 : c
->op_bytes
)) {
2060 kvm_inject_gp(ctxt
->vcpu
, 0);
2063 if (kvm_emulate_pio_string(ctxt
->vcpu
,
2065 (c
->d
& ByteOp
) ? 1 : c
->op_bytes
,
2067 address_mask(c
, c
->regs
[VCPU_REGS_RCX
]) : 1,
2068 (ctxt
->eflags
& EFLG_DF
),
2070 seg_override_base(ctxt
, c
),
2071 c
->regs
[VCPU_REGS_RSI
]),
2073 c
->regs
[VCPU_REGS_RDX
]) == 0) {
2078 case 0x70 ... 0x7f: /* jcc (short) */
2079 if (test_cc(c
->b
, ctxt
->eflags
))
2080 jmp_rel(c
, c
->src
.val
);
2082 case 0x80 ... 0x83: /* Grp1 */
2083 switch (c
->modrm_reg
) {
2103 emulate_2op_SrcV("test", c
->src
, c
->dst
, ctxt
->eflags
);
2105 case 0x86 ... 0x87: /* xchg */
2107 /* Write back the register source. */
2108 switch (c
->dst
.bytes
) {
2110 *(u8
*) c
->src
.ptr
= (u8
) c
->dst
.val
;
2113 *(u16
*) c
->src
.ptr
= (u16
) c
->dst
.val
;
2116 *c
->src
.ptr
= (u32
) c
->dst
.val
;
2117 break; /* 64b reg: zero-extend */
2119 *c
->src
.ptr
= c
->dst
.val
;
2123 * Write back the memory destination with implicit LOCK
2126 c
->dst
.val
= c
->src
.val
;
2129 case 0x88 ... 0x8b: /* mov */
2131 case 0x8c: { /* mov r/m, sreg */
2132 struct kvm_segment segreg
;
2134 if (c
->modrm_reg
<= VCPU_SREG_GS
)
2135 kvm_get_segment(ctxt
->vcpu
, &segreg
, c
->modrm_reg
);
2137 kvm_queue_exception(ctxt
->vcpu
, UD_VECTOR
);
2140 c
->dst
.val
= segreg
.selector
;
2143 case 0x8d: /* lea r16/r32, m */
2144 c
->dst
.val
= c
->modrm_ea
;
2146 case 0x8e: { /* mov seg, r/m16 */
2151 if (c
->modrm_reg
== VCPU_SREG_CS
||
2152 c
->modrm_reg
> VCPU_SREG_GS
) {
2153 kvm_queue_exception(ctxt
->vcpu
, UD_VECTOR
);
2157 if (c
->modrm_reg
== VCPU_SREG_SS
)
2158 toggle_interruptibility(ctxt
, KVM_X86_SHADOW_INT_MOV_SS
);
2160 rc
= kvm_load_segment_descriptor(ctxt
->vcpu
, sel
, c
->modrm_reg
);
2162 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2165 case 0x8f: /* pop (sole member of Grp1a) */
2166 rc
= emulate_grp1a(ctxt
, ops
);
2167 if (rc
!= X86EMUL_CONTINUE
)
2170 case 0x90: /* nop / xchg r8,rax */
2171 if (!(c
->rex_prefix
& 1)) { /* nop */
2172 c
->dst
.type
= OP_NONE
;
2175 case 0x91 ... 0x97: /* xchg reg,rax */
2176 c
->src
.type
= c
->dst
.type
= OP_REG
;
2177 c
->src
.bytes
= c
->dst
.bytes
= c
->op_bytes
;
2178 c
->src
.ptr
= (unsigned long *) &c
->regs
[VCPU_REGS_RAX
];
2179 c
->src
.val
= *(c
->src
.ptr
);
2181 case 0x9c: /* pushf */
2182 c
->src
.val
= (unsigned long) ctxt
->eflags
;
2185 case 0x9d: /* popf */
2186 c
->dst
.type
= OP_REG
;
2187 c
->dst
.ptr
= (unsigned long *) &ctxt
->eflags
;
2188 c
->dst
.bytes
= c
->op_bytes
;
2189 rc
= emulate_popf(ctxt
, ops
, &c
->dst
.val
, c
->op_bytes
);
2190 if (rc
!= X86EMUL_CONTINUE
)
2193 case 0xa0 ... 0xa1: /* mov */
2194 c
->dst
.ptr
= (unsigned long *)&c
->regs
[VCPU_REGS_RAX
];
2195 c
->dst
.val
= c
->src
.val
;
2197 case 0xa2 ... 0xa3: /* mov */
2198 c
->dst
.val
= (unsigned long)c
->regs
[VCPU_REGS_RAX
];
2200 case 0xa4 ... 0xa5: /* movs */
2201 c
->dst
.type
= OP_MEM
;
2202 c
->dst
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
2203 c
->dst
.ptr
= (unsigned long *)register_address(c
,
2205 c
->regs
[VCPU_REGS_RDI
]);
2206 rc
= ops
->read_emulated(register_address(c
,
2207 seg_override_base(ctxt
, c
),
2208 c
->regs
[VCPU_REGS_RSI
]),
2210 c
->dst
.bytes
, ctxt
->vcpu
);
2211 if (rc
!= X86EMUL_CONTINUE
)
2213 register_address_increment(c
, &c
->regs
[VCPU_REGS_RSI
],
2214 (ctxt
->eflags
& EFLG_DF
) ? -c
->dst
.bytes
2216 register_address_increment(c
, &c
->regs
[VCPU_REGS_RDI
],
2217 (ctxt
->eflags
& EFLG_DF
) ? -c
->dst
.bytes
2220 case 0xa6 ... 0xa7: /* cmps */
2221 c
->src
.type
= OP_NONE
; /* Disable writeback. */
2222 c
->src
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
2223 c
->src
.ptr
= (unsigned long *)register_address(c
,
2224 seg_override_base(ctxt
, c
),
2225 c
->regs
[VCPU_REGS_RSI
]);
2226 rc
= ops
->read_emulated((unsigned long)c
->src
.ptr
,
2230 if (rc
!= X86EMUL_CONTINUE
)
2233 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2234 c
->dst
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
2235 c
->dst
.ptr
= (unsigned long *)register_address(c
,
2237 c
->regs
[VCPU_REGS_RDI
]);
2238 rc
= ops
->read_emulated((unsigned long)c
->dst
.ptr
,
2242 if (rc
!= X86EMUL_CONTINUE
)
2245 DPRINTF("cmps: mem1=0x%p mem2=0x%p\n", c
->src
.ptr
, c
->dst
.ptr
);
2247 emulate_2op_SrcV("cmp", c
->src
, c
->dst
, ctxt
->eflags
);
2249 register_address_increment(c
, &c
->regs
[VCPU_REGS_RSI
],
2250 (ctxt
->eflags
& EFLG_DF
) ? -c
->src
.bytes
2252 register_address_increment(c
, &c
->regs
[VCPU_REGS_RDI
],
2253 (ctxt
->eflags
& EFLG_DF
) ? -c
->dst
.bytes
2257 case 0xaa ... 0xab: /* stos */
2258 c
->dst
.type
= OP_MEM
;
2259 c
->dst
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
2260 c
->dst
.ptr
= (unsigned long *)register_address(c
,
2262 c
->regs
[VCPU_REGS_RDI
]);
2263 c
->dst
.val
= c
->regs
[VCPU_REGS_RAX
];
2264 register_address_increment(c
, &c
->regs
[VCPU_REGS_RDI
],
2265 (ctxt
->eflags
& EFLG_DF
) ? -c
->dst
.bytes
2268 case 0xac ... 0xad: /* lods */
2269 c
->dst
.type
= OP_REG
;
2270 c
->dst
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
2271 c
->dst
.ptr
= (unsigned long *)&c
->regs
[VCPU_REGS_RAX
];
2272 rc
= ops
->read_emulated(register_address(c
,
2273 seg_override_base(ctxt
, c
),
2274 c
->regs
[VCPU_REGS_RSI
]),
2278 if (rc
!= X86EMUL_CONTINUE
)
2280 register_address_increment(c
, &c
->regs
[VCPU_REGS_RSI
],
2281 (ctxt
->eflags
& EFLG_DF
) ? -c
->dst
.bytes
2284 case 0xae ... 0xaf: /* scas */
2285 DPRINTF("Urk! I don't handle SCAS.\n");
2286 goto cannot_emulate
;
2287 case 0xb0 ... 0xbf: /* mov r, imm */
2292 case 0xc3: /* ret */
2293 c
->dst
.type
= OP_REG
;
2294 c
->dst
.ptr
= &c
->eip
;
2295 c
->dst
.bytes
= c
->op_bytes
;
2296 goto pop_instruction
;
2297 case 0xc6 ... 0xc7: /* mov (sole member of Grp11) */
2299 c
->dst
.val
= c
->src
.val
;
2301 case 0xcb: /* ret far */
2302 rc
= emulate_ret_far(ctxt
, ops
);
2303 if (rc
!= X86EMUL_CONTINUE
)
2306 case 0xd0 ... 0xd1: /* Grp2 */
2310 case 0xd2 ... 0xd3: /* Grp2 */
2311 c
->src
.val
= c
->regs
[VCPU_REGS_RCX
];
2314 case 0xe4: /* inb */
2319 case 0xe6: /* outb */
2320 case 0xe7: /* out */
2324 case 0xe8: /* call (near) */ {
2325 long int rel
= c
->src
.val
;
2326 c
->src
.val
= (unsigned long) c
->eip
;
2331 case 0xe9: /* jmp rel */
2333 case 0xea: /* jmp far */
2335 if (kvm_load_segment_descriptor(ctxt
->vcpu
, c
->src2
.val
,
2339 c
->eip
= c
->src
.val
;
2342 jmp
: /* jmp rel short */
2343 jmp_rel(c
, c
->src
.val
);
2344 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2346 case 0xec: /* in al,dx */
2347 case 0xed: /* in (e/r)ax,dx */
2348 port
= c
->regs
[VCPU_REGS_RDX
];
2351 case 0xee: /* out al,dx */
2352 case 0xef: /* out (e/r)ax,dx */
2353 port
= c
->regs
[VCPU_REGS_RDX
];
2356 if (!emulator_io_permited(ctxt
, ops
, port
,
2357 (c
->d
& ByteOp
) ? 1 : c
->op_bytes
)) {
2358 kvm_inject_gp(ctxt
->vcpu
, 0);
2361 if (kvm_emulate_pio(ctxt
->vcpu
, io_dir_in
,
2362 (c
->d
& ByteOp
) ? 1 : c
->op_bytes
,
2365 goto cannot_emulate
;
2368 case 0xf4: /* hlt */
2369 ctxt
->vcpu
->arch
.halt_request
= 1;
2371 case 0xf5: /* cmc */
2372 /* complement carry flag from eflags reg */
2373 ctxt
->eflags
^= EFLG_CF
;
2374 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2376 case 0xf6 ... 0xf7: /* Grp3 */
2377 rc
= emulate_grp3(ctxt
, ops
);
2378 if (rc
!= X86EMUL_CONTINUE
)
2381 case 0xf8: /* clc */
2382 ctxt
->eflags
&= ~EFLG_CF
;
2383 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2385 case 0xfa: /* cli */
2386 if (emulator_bad_iopl(ctxt
, ops
))
2387 kvm_inject_gp(ctxt
->vcpu
, 0);
2389 ctxt
->eflags
&= ~X86_EFLAGS_IF
;
2390 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2393 case 0xfb: /* sti */
2394 if (emulator_bad_iopl(ctxt
, ops
))
2395 kvm_inject_gp(ctxt
->vcpu
, 0);
2397 toggle_interruptibility(ctxt
, KVM_X86_SHADOW_INT_STI
);
2398 ctxt
->eflags
|= X86_EFLAGS_IF
;
2399 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2402 case 0xfc: /* cld */
2403 ctxt
->eflags
&= ~EFLG_DF
;
2404 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2406 case 0xfd: /* std */
2407 ctxt
->eflags
|= EFLG_DF
;
2408 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2410 case 0xfe: /* Grp4 */
2412 rc
= emulate_grp45(ctxt
, ops
);
2413 if (rc
!= X86EMUL_CONTINUE
)
2416 case 0xff: /* Grp5 */
2417 if (c
->modrm_reg
== 5)
2423 rc
= writeback(ctxt
, ops
);
2424 if (rc
!= X86EMUL_CONTINUE
)
2427 /* Commit shadow register state. */
2428 memcpy(ctxt
->vcpu
->arch
.regs
, c
->regs
, sizeof c
->regs
);
2429 kvm_rip_write(ctxt
->vcpu
, c
->eip
);
2432 if (rc
== X86EMUL_UNHANDLEABLE
) {
2440 case 0x01: /* lgdt, lidt, lmsw */
2441 switch (c
->modrm_reg
) {
2443 unsigned long address
;
2445 case 0: /* vmcall */
2446 if (c
->modrm_mod
!= 3 || c
->modrm_rm
!= 1)
2447 goto cannot_emulate
;
2449 rc
= kvm_fix_hypercall(ctxt
->vcpu
);
2450 if (rc
!= X86EMUL_CONTINUE
)
2453 /* Let the processor re-execute the fixed hypercall */
2455 /* Disable writeback. */
2456 c
->dst
.type
= OP_NONE
;
2459 rc
= read_descriptor(ctxt
, ops
, c
->src
.ptr
,
2460 &size
, &address
, c
->op_bytes
);
2461 if (rc
!= X86EMUL_CONTINUE
)
2463 realmode_lgdt(ctxt
->vcpu
, size
, address
);
2464 /* Disable writeback. */
2465 c
->dst
.type
= OP_NONE
;
2467 case 3: /* lidt/vmmcall */
2468 if (c
->modrm_mod
== 3) {
2469 switch (c
->modrm_rm
) {
2471 rc
= kvm_fix_hypercall(ctxt
->vcpu
);
2472 if (rc
!= X86EMUL_CONTINUE
)
2476 goto cannot_emulate
;
2479 rc
= read_descriptor(ctxt
, ops
, c
->src
.ptr
,
2482 if (rc
!= X86EMUL_CONTINUE
)
2484 realmode_lidt(ctxt
->vcpu
, size
, address
);
2486 /* Disable writeback. */
2487 c
->dst
.type
= OP_NONE
;
2491 c
->dst
.val
= ops
->get_cr(0, ctxt
->vcpu
);
2494 ops
->set_cr(0, (ops
->get_cr(0, ctxt
->vcpu
) & ~0x0ful
) |
2495 (c
->src
.val
& 0x0f), ctxt
->vcpu
);
2496 c
->dst
.type
= OP_NONE
;
2498 case 5: /* not defined */
2499 kvm_queue_exception(ctxt
->vcpu
, UD_VECTOR
);
2502 emulate_invlpg(ctxt
->vcpu
, memop
);
2503 /* Disable writeback. */
2504 c
->dst
.type
= OP_NONE
;
2507 goto cannot_emulate
;
2510 case 0x05: /* syscall */
2511 rc
= emulate_syscall(ctxt
);
2512 if (rc
!= X86EMUL_CONTINUE
)
2518 emulate_clts(ctxt
->vcpu
);
2519 c
->dst
.type
= OP_NONE
;
2521 case 0x08: /* invd */
2522 case 0x09: /* wbinvd */
2523 case 0x0d: /* GrpP (prefetch) */
2524 case 0x18: /* Grp16 (prefetch/nop) */
2525 c
->dst
.type
= OP_NONE
;
2527 case 0x20: /* mov cr, reg */
2528 switch (c
->modrm_reg
) {
2532 kvm_queue_exception(ctxt
->vcpu
, UD_VECTOR
);
2535 c
->regs
[c
->modrm_rm
] = ops
->get_cr(c
->modrm_reg
, ctxt
->vcpu
);
2536 c
->dst
.type
= OP_NONE
; /* no writeback */
2538 case 0x21: /* mov from dr to reg */
2539 if ((ops
->get_cr(4, ctxt
->vcpu
) & X86_CR4_DE
) &&
2540 (c
->modrm_reg
== 4 || c
->modrm_reg
== 5)) {
2541 kvm_queue_exception(ctxt
->vcpu
, UD_VECTOR
);
2544 emulator_get_dr(ctxt
, c
->modrm_reg
, &c
->regs
[c
->modrm_rm
]);
2545 c
->dst
.type
= OP_NONE
; /* no writeback */
2547 case 0x22: /* mov reg, cr */
2548 ops
->set_cr(c
->modrm_reg
, c
->modrm_val
, ctxt
->vcpu
);
2549 c
->dst
.type
= OP_NONE
;
2551 case 0x23: /* mov from reg to dr */
2552 if ((ops
->get_cr(4, ctxt
->vcpu
) & X86_CR4_DE
) &&
2553 (c
->modrm_reg
== 4 || c
->modrm_reg
== 5)) {
2554 kvm_queue_exception(ctxt
->vcpu
, UD_VECTOR
);
2557 emulator_set_dr(ctxt
, c
->modrm_reg
, c
->regs
[c
->modrm_rm
]);
2558 c
->dst
.type
= OP_NONE
; /* no writeback */
2562 msr_data
= (u32
)c
->regs
[VCPU_REGS_RAX
]
2563 | ((u64
)c
->regs
[VCPU_REGS_RDX
] << 32);
2564 if (kvm_set_msr(ctxt
->vcpu
, c
->regs
[VCPU_REGS_RCX
], msr_data
)) {
2565 kvm_inject_gp(ctxt
->vcpu
, 0);
2568 rc
= X86EMUL_CONTINUE
;
2569 c
->dst
.type
= OP_NONE
;
2573 if (kvm_get_msr(ctxt
->vcpu
, c
->regs
[VCPU_REGS_RCX
], &msr_data
)) {
2574 kvm_inject_gp(ctxt
->vcpu
, 0);
2577 c
->regs
[VCPU_REGS_RAX
] = (u32
)msr_data
;
2578 c
->regs
[VCPU_REGS_RDX
] = msr_data
>> 32;
2580 rc
= X86EMUL_CONTINUE
;
2581 c
->dst
.type
= OP_NONE
;
2583 case 0x34: /* sysenter */
2584 rc
= emulate_sysenter(ctxt
);
2585 if (rc
!= X86EMUL_CONTINUE
)
2590 case 0x35: /* sysexit */
2591 rc
= emulate_sysexit(ctxt
);
2592 if (rc
!= X86EMUL_CONTINUE
)
2597 case 0x40 ... 0x4f: /* cmov */
2598 c
->dst
.val
= c
->dst
.orig_val
= c
->src
.val
;
2599 if (!test_cc(c
->b
, ctxt
->eflags
))
2600 c
->dst
.type
= OP_NONE
; /* no writeback */
2602 case 0x80 ... 0x8f: /* jnz rel, etc*/
2603 if (test_cc(c
->b
, ctxt
->eflags
))
2604 jmp_rel(c
, c
->src
.val
);
2605 c
->dst
.type
= OP_NONE
;
2607 case 0xa0: /* push fs */
2608 emulate_push_sreg(ctxt
, VCPU_SREG_FS
);
2610 case 0xa1: /* pop fs */
2611 rc
= emulate_pop_sreg(ctxt
, ops
, VCPU_SREG_FS
);
2612 if (rc
!= X86EMUL_CONTINUE
)
2617 c
->dst
.type
= OP_NONE
;
2618 /* only subword offset */
2619 c
->src
.val
&= (c
->dst
.bytes
<< 3) - 1;
2620 emulate_2op_SrcV_nobyte("bt", c
->src
, c
->dst
, ctxt
->eflags
);
2622 case 0xa4: /* shld imm8, r, r/m */
2623 case 0xa5: /* shld cl, r, r/m */
2624 emulate_2op_cl("shld", c
->src2
, c
->src
, c
->dst
, ctxt
->eflags
);
2626 case 0xa8: /* push gs */
2627 emulate_push_sreg(ctxt
, VCPU_SREG_GS
);
2629 case 0xa9: /* pop gs */
2630 rc
= emulate_pop_sreg(ctxt
, ops
, VCPU_SREG_GS
);
2631 if (rc
!= X86EMUL_CONTINUE
)
2636 /* only subword offset */
2637 c
->src
.val
&= (c
->dst
.bytes
<< 3) - 1;
2638 emulate_2op_SrcV_nobyte("bts", c
->src
, c
->dst
, ctxt
->eflags
);
2640 case 0xac: /* shrd imm8, r, r/m */
2641 case 0xad: /* shrd cl, r, r/m */
2642 emulate_2op_cl("shrd", c
->src2
, c
->src
, c
->dst
, ctxt
->eflags
);
2644 case 0xae: /* clflush */
2646 case 0xb0 ... 0xb1: /* cmpxchg */
2648 * Save real source value, then compare EAX against
2651 c
->src
.orig_val
= c
->src
.val
;
2652 c
->src
.val
= c
->regs
[VCPU_REGS_RAX
];
2653 emulate_2op_SrcV("cmp", c
->src
, c
->dst
, ctxt
->eflags
);
2654 if (ctxt
->eflags
& EFLG_ZF
) {
2655 /* Success: write back to memory. */
2656 c
->dst
.val
= c
->src
.orig_val
;
2658 /* Failure: write the value we saw to EAX. */
2659 c
->dst
.type
= OP_REG
;
2660 c
->dst
.ptr
= (unsigned long *)&c
->regs
[VCPU_REGS_RAX
];
2665 /* only subword offset */
2666 c
->src
.val
&= (c
->dst
.bytes
<< 3) - 1;
2667 emulate_2op_SrcV_nobyte("btr", c
->src
, c
->dst
, ctxt
->eflags
);
2669 case 0xb6 ... 0xb7: /* movzx */
2670 c
->dst
.bytes
= c
->op_bytes
;
2671 c
->dst
.val
= (c
->d
& ByteOp
) ? (u8
) c
->src
.val
2674 case 0xba: /* Grp8 */
2675 switch (c
->modrm_reg
& 3) {
2688 /* only subword offset */
2689 c
->src
.val
&= (c
->dst
.bytes
<< 3) - 1;
2690 emulate_2op_SrcV_nobyte("btc", c
->src
, c
->dst
, ctxt
->eflags
);
2692 case 0xbe ... 0xbf: /* movsx */
2693 c
->dst
.bytes
= c
->op_bytes
;
2694 c
->dst
.val
= (c
->d
& ByteOp
) ? (s8
) c
->src
.val
:
2697 case 0xc3: /* movnti */
2698 c
->dst
.bytes
= c
->op_bytes
;
2699 c
->dst
.val
= (c
->op_bytes
== 4) ? (u32
) c
->src
.val
:
2702 case 0xc7: /* Grp9 (cmpxchg8b) */
2703 rc
= emulate_grp9(ctxt
, ops
, memop
);
2704 if (rc
!= X86EMUL_CONTINUE
)
2706 c
->dst
.type
= OP_NONE
;
2712 DPRINTF("Cannot emulate %02x\n", c
->b
);