]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blob - arch/x86/kvm/emulate.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input
[mirror_ubuntu-focal-kernel.git] / arch / x86 / kvm / emulate.c
1 /******************************************************************************
2 * emulate.c
3 *
4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
5 *
6 * Copyright (c) 2005 Keir Fraser
7 *
8 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
9 * privileged instructions:
10 *
11 * Copyright (C) 2006 Qumranet
12 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
13 *
14 * Avi Kivity <avi@qumranet.com>
15 * Yaniv Kamay <yaniv@qumranet.com>
16 *
17 * This work is licensed under the terms of the GNU GPL, version 2. See
18 * the COPYING file in the top-level directory.
19 *
20 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
21 */
22
23 #include <linux/kvm_host.h>
24 #include "kvm_cache_regs.h"
25 #include <asm/kvm_emulate.h>
26 #include <linux/stringify.h>
27 #include <asm/debugreg.h>
28
29 #include "x86.h"
30 #include "tss.h"
31 #include "mmu.h"
32
33 /*
34 * Operand types
35 */
36 #define OpNone 0ull
37 #define OpImplicit 1ull /* No generic decode */
38 #define OpReg 2ull /* Register */
39 #define OpMem 3ull /* Memory */
40 #define OpAcc 4ull /* Accumulator: AL/AX/EAX/RAX */
41 #define OpDI 5ull /* ES:DI/EDI/RDI */
42 #define OpMem64 6ull /* Memory, 64-bit */
43 #define OpImmUByte 7ull /* Zero-extended 8-bit immediate */
44 #define OpDX 8ull /* DX register */
45 #define OpCL 9ull /* CL register (for shifts) */
46 #define OpImmByte 10ull /* 8-bit sign extended immediate */
47 #define OpOne 11ull /* Implied 1 */
48 #define OpImm 12ull /* Sign extended up to 32-bit immediate */
49 #define OpMem16 13ull /* Memory operand (16-bit). */
50 #define OpMem32 14ull /* Memory operand (32-bit). */
51 #define OpImmU 15ull /* Immediate operand, zero extended */
52 #define OpSI 16ull /* SI/ESI/RSI */
53 #define OpImmFAddr 17ull /* Immediate far address */
54 #define OpMemFAddr 18ull /* Far address in memory */
55 #define OpImmU16 19ull /* Immediate operand, 16 bits, zero extended */
56 #define OpES 20ull /* ES */
57 #define OpCS 21ull /* CS */
58 #define OpSS 22ull /* SS */
59 #define OpDS 23ull /* DS */
60 #define OpFS 24ull /* FS */
61 #define OpGS 25ull /* GS */
62 #define OpMem8 26ull /* 8-bit zero extended memory operand */
63 #define OpImm64 27ull /* Sign extended 16/32/64-bit immediate */
64 #define OpXLat 28ull /* memory at BX/EBX/RBX + zero-extended AL */
65 #define OpAccLo 29ull /* Low part of extended acc (AX/AX/EAX/RAX) */
66 #define OpAccHi 30ull /* High part of extended acc (-/DX/EDX/RDX) */
67
68 #define OpBits 5 /* Width of operand field */
69 #define OpMask ((1ull << OpBits) - 1)
70
71 /*
72 * Opcode effective-address decode tables.
73 * Note that we only emulate instructions that have at least one memory
74 * operand (excluding implicit stack references). We assume that stack
75 * references and instruction fetches will never occur in special memory
76 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
77 * not be handled.
78 */
79
80 /* Operand sizes: 8-bit operands or specified/overridden size. */
81 #define ByteOp (1<<0) /* 8-bit operands. */
82 /* Destination operand type. */
83 #define DstShift 1
84 #define ImplicitOps (OpImplicit << DstShift)
85 #define DstReg (OpReg << DstShift)
86 #define DstMem (OpMem << DstShift)
87 #define DstAcc (OpAcc << DstShift)
88 #define DstDI (OpDI << DstShift)
89 #define DstMem64 (OpMem64 << DstShift)
90 #define DstMem16 (OpMem16 << DstShift)
91 #define DstImmUByte (OpImmUByte << DstShift)
92 #define DstDX (OpDX << DstShift)
93 #define DstAccLo (OpAccLo << DstShift)
94 #define DstMask (OpMask << DstShift)
95 /* Source operand type. */
96 #define SrcShift 6
97 #define SrcNone (OpNone << SrcShift)
98 #define SrcReg (OpReg << SrcShift)
99 #define SrcMem (OpMem << SrcShift)
100 #define SrcMem16 (OpMem16 << SrcShift)
101 #define SrcMem32 (OpMem32 << SrcShift)
102 #define SrcImm (OpImm << SrcShift)
103 #define SrcImmByte (OpImmByte << SrcShift)
104 #define SrcOne (OpOne << SrcShift)
105 #define SrcImmUByte (OpImmUByte << SrcShift)
106 #define SrcImmU (OpImmU << SrcShift)
107 #define SrcSI (OpSI << SrcShift)
108 #define SrcXLat (OpXLat << SrcShift)
109 #define SrcImmFAddr (OpImmFAddr << SrcShift)
110 #define SrcMemFAddr (OpMemFAddr << SrcShift)
111 #define SrcAcc (OpAcc << SrcShift)
112 #define SrcImmU16 (OpImmU16 << SrcShift)
113 #define SrcImm64 (OpImm64 << SrcShift)
114 #define SrcDX (OpDX << SrcShift)
115 #define SrcMem8 (OpMem8 << SrcShift)
116 #define SrcAccHi (OpAccHi << SrcShift)
117 #define SrcMask (OpMask << SrcShift)
118 #define BitOp (1<<11)
119 #define MemAbs (1<<12) /* Memory operand is absolute displacement */
120 #define String (1<<13) /* String instruction (rep capable) */
121 #define Stack (1<<14) /* Stack instruction (push/pop) */
122 #define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */
123 #define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */
124 #define GroupDual (2<<15) /* Alternate decoding of mod == 3 */
125 #define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */
126 #define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */
127 #define Escape (5<<15) /* Escape to coprocessor instruction */
128 #define InstrDual (6<<15) /* Alternate instruction decoding of mod == 3 */
129 #define ModeDual (7<<15) /* Different instruction for 32/64 bit */
130 #define Sse (1<<18) /* SSE Vector instruction */
131 /* Generic ModRM decode. */
132 #define ModRM (1<<19)
133 /* Destination is only written; never read. */
134 #define Mov (1<<20)
135 /* Misc flags */
136 #define Prot (1<<21) /* instruction generates #UD if not in prot-mode */
137 #define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */
138 #define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
139 #define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */
140 #define Undefined (1<<25) /* No Such Instruction */
141 #define Lock (1<<26) /* lock prefix is allowed for the instruction */
142 #define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
143 #define No64 (1<<28)
144 #define PageTable (1 << 29) /* instruction used to write page table */
145 #define NotImpl (1 << 30) /* instruction is not implemented */
146 /* Source 2 operand type */
147 #define Src2Shift (31)
148 #define Src2None (OpNone << Src2Shift)
149 #define Src2Mem (OpMem << Src2Shift)
150 #define Src2CL (OpCL << Src2Shift)
151 #define Src2ImmByte (OpImmByte << Src2Shift)
152 #define Src2One (OpOne << Src2Shift)
153 #define Src2Imm (OpImm << Src2Shift)
154 #define Src2ES (OpES << Src2Shift)
155 #define Src2CS (OpCS << Src2Shift)
156 #define Src2SS (OpSS << Src2Shift)
157 #define Src2DS (OpDS << Src2Shift)
158 #define Src2FS (OpFS << Src2Shift)
159 #define Src2GS (OpGS << Src2Shift)
160 #define Src2Mask (OpMask << Src2Shift)
161 #define Mmx ((u64)1 << 40) /* MMX Vector instruction */
162 #define AlignMask ((u64)7 << 41)
163 #define Aligned ((u64)1 << 41) /* Explicitly aligned (e.g. MOVDQA) */
164 #define Unaligned ((u64)2 << 41) /* Explicitly unaligned (e.g. MOVDQU) */
165 #define Avx ((u64)3 << 41) /* Advanced Vector Extensions */
166 #define Aligned16 ((u64)4 << 41) /* Aligned to 16 byte boundary (e.g. FXSAVE) */
167 #define Fastop ((u64)1 << 44) /* Use opcode::u.fastop */
168 #define NoWrite ((u64)1 << 45) /* No writeback */
169 #define SrcWrite ((u64)1 << 46) /* Write back src operand */
170 #define NoMod ((u64)1 << 47) /* Mod field is ignored */
171 #define Intercept ((u64)1 << 48) /* Has valid intercept field */
172 #define CheckPerm ((u64)1 << 49) /* Has valid check_perm field */
173 #define PrivUD ((u64)1 << 51) /* #UD instead of #GP on CPL > 0 */
174 #define NearBranch ((u64)1 << 52) /* Near branches */
175 #define No16 ((u64)1 << 53) /* No 16 bit operand */
176 #define IncSP ((u64)1 << 54) /* SP is incremented before ModRM calc */
177 #define TwoMemOp ((u64)1 << 55) /* Instruction has two memory operand */
178
179 #define DstXacc (DstAccLo | SrcAccHi | SrcWrite)
180
181 #define X2(x...) x, x
182 #define X3(x...) X2(x), x
183 #define X4(x...) X2(x), X2(x)
184 #define X5(x...) X4(x), x
185 #define X6(x...) X4(x), X2(x)
186 #define X7(x...) X4(x), X3(x)
187 #define X8(x...) X4(x), X4(x)
188 #define X16(x...) X8(x), X8(x)
189
190 #define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
191 #define FASTOP_SIZE 8
192
193 /*
194 * fastop functions have a special calling convention:
195 *
196 * dst: rax (in/out)
197 * src: rdx (in/out)
198 * src2: rcx (in)
199 * flags: rflags (in/out)
200 * ex: rsi (in:fastop pointer, out:zero if exception)
201 *
202 * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
203 * different operand sizes can be reached by calculation, rather than a jump
204 * table (which would be bigger than the code).
205 *
206 * fastop functions are declared as taking a never-defined fastop parameter,
207 * so they can't be called from C directly.
208 */
209
210 struct fastop;
211
212 struct opcode {
213 u64 flags : 56;
214 u64 intercept : 8;
215 union {
216 int (*execute)(struct x86_emulate_ctxt *ctxt);
217 const struct opcode *group;
218 const struct group_dual *gdual;
219 const struct gprefix *gprefix;
220 const struct escape *esc;
221 const struct instr_dual *idual;
222 const struct mode_dual *mdual;
223 void (*fastop)(struct fastop *fake);
224 } u;
225 int (*check_perm)(struct x86_emulate_ctxt *ctxt);
226 };
227
228 struct group_dual {
229 struct opcode mod012[8];
230 struct opcode mod3[8];
231 };
232
233 struct gprefix {
234 struct opcode pfx_no;
235 struct opcode pfx_66;
236 struct opcode pfx_f2;
237 struct opcode pfx_f3;
238 };
239
240 struct escape {
241 struct opcode op[8];
242 struct opcode high[64];
243 };
244
245 struct instr_dual {
246 struct opcode mod012;
247 struct opcode mod3;
248 };
249
250 struct mode_dual {
251 struct opcode mode32;
252 struct opcode mode64;
253 };
254
255 #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
256
257 enum x86_transfer_type {
258 X86_TRANSFER_NONE,
259 X86_TRANSFER_CALL_JMP,
260 X86_TRANSFER_RET,
261 X86_TRANSFER_TASK_SWITCH,
262 };
263
264 static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr)
265 {
266 if (!(ctxt->regs_valid & (1 << nr))) {
267 ctxt->regs_valid |= 1 << nr;
268 ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr);
269 }
270 return ctxt->_regs[nr];
271 }
272
273 static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr)
274 {
275 ctxt->regs_valid |= 1 << nr;
276 ctxt->regs_dirty |= 1 << nr;
277 return &ctxt->_regs[nr];
278 }
279
280 static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr)
281 {
282 reg_read(ctxt, nr);
283 return reg_write(ctxt, nr);
284 }
285
286 static void writeback_registers(struct x86_emulate_ctxt *ctxt)
287 {
288 unsigned reg;
289
290 for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16)
291 ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]);
292 }
293
294 static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
295 {
296 ctxt->regs_dirty = 0;
297 ctxt->regs_valid = 0;
298 }
299
300 /*
301 * These EFLAGS bits are restored from saved value during emulation, and
302 * any changes are written back to the saved value after emulation.
303 */
304 #define EFLAGS_MASK (X86_EFLAGS_OF|X86_EFLAGS_SF|X86_EFLAGS_ZF|X86_EFLAGS_AF|\
305 X86_EFLAGS_PF|X86_EFLAGS_CF)
306
307 #ifdef CONFIG_X86_64
308 #define ON64(x) x
309 #else
310 #define ON64(x)
311 #endif
312
313 static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));
314
315 #define FOP_FUNC(name) \
316 ".align " __stringify(FASTOP_SIZE) " \n\t" \
317 ".type " name ", @function \n\t" \
318 name ":\n\t"
319
320 #define FOP_RET "ret \n\t"
321
322 #define FOP_START(op) \
323 extern void em_##op(struct fastop *fake); \
324 asm(".pushsection .text, \"ax\" \n\t" \
325 ".global em_" #op " \n\t" \
326 FOP_FUNC("em_" #op)
327
328 #define FOP_END \
329 ".popsection")
330
331 #define FOPNOP() \
332 FOP_FUNC(__stringify(__UNIQUE_ID(nop))) \
333 FOP_RET
334
335 #define FOP1E(op, dst) \
336 FOP_FUNC(#op "_" #dst) \
337 "10: " #op " %" #dst " \n\t" FOP_RET
338
339 #define FOP1EEX(op, dst) \
340 FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception)
341
342 #define FASTOP1(op) \
343 FOP_START(op) \
344 FOP1E(op##b, al) \
345 FOP1E(op##w, ax) \
346 FOP1E(op##l, eax) \
347 ON64(FOP1E(op##q, rax)) \
348 FOP_END
349
350 /* 1-operand, using src2 (for MUL/DIV r/m) */
351 #define FASTOP1SRC2(op, name) \
352 FOP_START(name) \
353 FOP1E(op, cl) \
354 FOP1E(op, cx) \
355 FOP1E(op, ecx) \
356 ON64(FOP1E(op, rcx)) \
357 FOP_END
358
359 /* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */
360 #define FASTOP1SRC2EX(op, name) \
361 FOP_START(name) \
362 FOP1EEX(op, cl) \
363 FOP1EEX(op, cx) \
364 FOP1EEX(op, ecx) \
365 ON64(FOP1EEX(op, rcx)) \
366 FOP_END
367
368 #define FOP2E(op, dst, src) \
369 FOP_FUNC(#op "_" #dst "_" #src) \
370 #op " %" #src ", %" #dst " \n\t" FOP_RET
371
372 #define FASTOP2(op) \
373 FOP_START(op) \
374 FOP2E(op##b, al, dl) \
375 FOP2E(op##w, ax, dx) \
376 FOP2E(op##l, eax, edx) \
377 ON64(FOP2E(op##q, rax, rdx)) \
378 FOP_END
379
380 /* 2 operand, word only */
381 #define FASTOP2W(op) \
382 FOP_START(op) \
383 FOPNOP() \
384 FOP2E(op##w, ax, dx) \
385 FOP2E(op##l, eax, edx) \
386 ON64(FOP2E(op##q, rax, rdx)) \
387 FOP_END
388
389 /* 2 operand, src is CL */
390 #define FASTOP2CL(op) \
391 FOP_START(op) \
392 FOP2E(op##b, al, cl) \
393 FOP2E(op##w, ax, cl) \
394 FOP2E(op##l, eax, cl) \
395 ON64(FOP2E(op##q, rax, cl)) \
396 FOP_END
397
398 /* 2 operand, src and dest are reversed */
399 #define FASTOP2R(op, name) \
400 FOP_START(name) \
401 FOP2E(op##b, dl, al) \
402 FOP2E(op##w, dx, ax) \
403 FOP2E(op##l, edx, eax) \
404 ON64(FOP2E(op##q, rdx, rax)) \
405 FOP_END
406
407 #define FOP3E(op, dst, src, src2) \
408 FOP_FUNC(#op "_" #dst "_" #src "_" #src2) \
409 #op " %" #src2 ", %" #src ", %" #dst " \n\t" FOP_RET
410
411 /* 3-operand, word-only, src2=cl */
412 #define FASTOP3WCL(op) \
413 FOP_START(op) \
414 FOPNOP() \
415 FOP3E(op##w, ax, dx, cl) \
416 FOP3E(op##l, eax, edx, cl) \
417 ON64(FOP3E(op##q, rax, rdx, cl)) \
418 FOP_END
419
420 /* Special case for SETcc - 1 instruction per cc */
421 #define FOP_SETCC(op) \
422 ".align 4 \n\t" \
423 ".type " #op ", @function \n\t" \
424 #op ": \n\t" \
425 #op " %al \n\t" \
426 FOP_RET
427
428 asm(".pushsection .fixup, \"ax\"\n"
429 ".global kvm_fastop_exception \n"
430 "kvm_fastop_exception: xor %esi, %esi; ret\n"
431 ".popsection");
432
433 FOP_START(setcc)
434 FOP_SETCC(seto)
435 FOP_SETCC(setno)
436 FOP_SETCC(setc)
437 FOP_SETCC(setnc)
438 FOP_SETCC(setz)
439 FOP_SETCC(setnz)
440 FOP_SETCC(setbe)
441 FOP_SETCC(setnbe)
442 FOP_SETCC(sets)
443 FOP_SETCC(setns)
444 FOP_SETCC(setp)
445 FOP_SETCC(setnp)
446 FOP_SETCC(setl)
447 FOP_SETCC(setnl)
448 FOP_SETCC(setle)
449 FOP_SETCC(setnle)
450 FOP_END;
451
452 FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET
453 FOP_END;
454
455 /*
456 * XXX: inoutclob user must know where the argument is being expanded.
457 * Relying on CC_HAVE_ASM_GOTO would allow us to remove _fault.
458 */
459 #define asm_safe(insn, inoutclob...) \
460 ({ \
461 int _fault = 0; \
462 \
463 asm volatile("1:" insn "\n" \
464 "2:\n" \
465 ".pushsection .fixup, \"ax\"\n" \
466 "3: movl $1, %[_fault]\n" \
467 " jmp 2b\n" \
468 ".popsection\n" \
469 _ASM_EXTABLE(1b, 3b) \
470 : [_fault] "+qm"(_fault) inoutclob ); \
471 \
472 _fault ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE; \
473 })
474
475 static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
476 enum x86_intercept intercept,
477 enum x86_intercept_stage stage)
478 {
479 struct x86_instruction_info info = {
480 .intercept = intercept,
481 .rep_prefix = ctxt->rep_prefix,
482 .modrm_mod = ctxt->modrm_mod,
483 .modrm_reg = ctxt->modrm_reg,
484 .modrm_rm = ctxt->modrm_rm,
485 .src_val = ctxt->src.val64,
486 .dst_val = ctxt->dst.val64,
487 .src_bytes = ctxt->src.bytes,
488 .dst_bytes = ctxt->dst.bytes,
489 .ad_bytes = ctxt->ad_bytes,
490 .next_rip = ctxt->eip,
491 };
492
493 return ctxt->ops->intercept(ctxt, &info, stage);
494 }
495
496 static void assign_masked(ulong *dest, ulong src, ulong mask)
497 {
498 *dest = (*dest & ~mask) | (src & mask);
499 }
500
501 static void assign_register(unsigned long *reg, u64 val, int bytes)
502 {
503 /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
504 switch (bytes) {
505 case 1:
506 *(u8 *)reg = (u8)val;
507 break;
508 case 2:
509 *(u16 *)reg = (u16)val;
510 break;
511 case 4:
512 *reg = (u32)val;
513 break; /* 64b: zero-extend */
514 case 8:
515 *reg = val;
516 break;
517 }
518 }
519
520 static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
521 {
522 return (1UL << (ctxt->ad_bytes << 3)) - 1;
523 }
524
525 static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
526 {
527 u16 sel;
528 struct desc_struct ss;
529
530 if (ctxt->mode == X86EMUL_MODE_PROT64)
531 return ~0UL;
532 ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS);
533 return ~0U >> ((ss.d ^ 1) * 16); /* d=0: 0xffff; d=1: 0xffffffff */
534 }
535
536 static int stack_size(struct x86_emulate_ctxt *ctxt)
537 {
538 return (__fls(stack_mask(ctxt)) + 1) >> 3;
539 }
540
541 /* Access/update address held in a register, based on addressing mode. */
542 static inline unsigned long
543 address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
544 {
545 if (ctxt->ad_bytes == sizeof(unsigned long))
546 return reg;
547 else
548 return reg & ad_mask(ctxt);
549 }
550
551 static inline unsigned long
552 register_address(struct x86_emulate_ctxt *ctxt, int reg)
553 {
554 return address_mask(ctxt, reg_read(ctxt, reg));
555 }
556
557 static void masked_increment(ulong *reg, ulong mask, int inc)
558 {
559 assign_masked(reg, *reg + inc, mask);
560 }
561
562 static inline void
563 register_address_increment(struct x86_emulate_ctxt *ctxt, int reg, int inc)
564 {
565 ulong *preg = reg_rmw(ctxt, reg);
566
567 assign_register(preg, *preg + inc, ctxt->ad_bytes);
568 }
569
570 static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
571 {
572 masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
573 }
574
575 static u32 desc_limit_scaled(struct desc_struct *desc)
576 {
577 u32 limit = get_desc_limit(desc);
578
579 return desc->g ? (limit << 12) | 0xfff : limit;
580 }
581
582 static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
583 {
584 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
585 return 0;
586
587 return ctxt->ops->get_cached_segment_base(ctxt, seg);
588 }
589
590 static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
591 u32 error, bool valid)
592 {
593 WARN_ON(vec > 0x1f);
594 ctxt->exception.vector = vec;
595 ctxt->exception.error_code = error;
596 ctxt->exception.error_code_valid = valid;
597 return X86EMUL_PROPAGATE_FAULT;
598 }
599
600 static int emulate_db(struct x86_emulate_ctxt *ctxt)
601 {
602 return emulate_exception(ctxt, DB_VECTOR, 0, false);
603 }
604
605 static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
606 {
607 return emulate_exception(ctxt, GP_VECTOR, err, true);
608 }
609
610 static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
611 {
612 return emulate_exception(ctxt, SS_VECTOR, err, true);
613 }
614
615 static int emulate_ud(struct x86_emulate_ctxt *ctxt)
616 {
617 return emulate_exception(ctxt, UD_VECTOR, 0, false);
618 }
619
620 static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
621 {
622 return emulate_exception(ctxt, TS_VECTOR, err, true);
623 }
624
625 static int emulate_de(struct x86_emulate_ctxt *ctxt)
626 {
627 return emulate_exception(ctxt, DE_VECTOR, 0, false);
628 }
629
630 static int emulate_nm(struct x86_emulate_ctxt *ctxt)
631 {
632 return emulate_exception(ctxt, NM_VECTOR, 0, false);
633 }
634
635 static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
636 {
637 u16 selector;
638 struct desc_struct desc;
639
640 ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
641 return selector;
642 }
643
644 static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
645 unsigned seg)
646 {
647 u16 dummy;
648 u32 base3;
649 struct desc_struct desc;
650
651 ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
652 ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
653 }
654
655 /*
656 * x86 defines three classes of vector instructions: explicitly
657 * aligned, explicitly unaligned, and the rest, which change behaviour
658 * depending on whether they're AVX encoded or not.
659 *
660 * Also included is CMPXCHG16B which is not a vector instruction, yet it is
661 * subject to the same check. FXSAVE and FXRSTOR are checked here too as their
662 * 512 bytes of data must be aligned to a 16 byte boundary.
663 */
664 static unsigned insn_alignment(struct x86_emulate_ctxt *ctxt, unsigned size)
665 {
666 u64 alignment = ctxt->d & AlignMask;
667
668 if (likely(size < 16))
669 return 1;
670
671 switch (alignment) {
672 case Unaligned:
673 case Avx:
674 return 1;
675 case Aligned16:
676 return 16;
677 case Aligned:
678 default:
679 return size;
680 }
681 }
682
683 static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
684 struct segmented_address addr,
685 unsigned *max_size, unsigned size,
686 bool write, bool fetch,
687 enum x86emul_mode mode, ulong *linear)
688 {
689 struct desc_struct desc;
690 bool usable;
691 ulong la;
692 u32 lim;
693 u16 sel;
694 u8 va_bits;
695
696 la = seg_base(ctxt, addr.seg) + addr.ea;
697 *max_size = 0;
698 switch (mode) {
699 case X86EMUL_MODE_PROT64:
700 *linear = la;
701 va_bits = ctxt_virt_addr_bits(ctxt);
702 if (get_canonical(la, va_bits) != la)
703 goto bad;
704
705 *max_size = min_t(u64, ~0u, (1ull << va_bits) - la);
706 if (size > *max_size)
707 goto bad;
708 break;
709 default:
710 *linear = la = (u32)la;
711 usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
712 addr.seg);
713 if (!usable)
714 goto bad;
715 /* code segment in protected mode or read-only data segment */
716 if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8))
717 || !(desc.type & 2)) && write)
718 goto bad;
719 /* unreadable code segment */
720 if (!fetch && (desc.type & 8) && !(desc.type & 2))
721 goto bad;
722 lim = desc_limit_scaled(&desc);
723 if (!(desc.type & 8) && (desc.type & 4)) {
724 /* expand-down segment */
725 if (addr.ea <= lim)
726 goto bad;
727 lim = desc.d ? 0xffffffff : 0xffff;
728 }
729 if (addr.ea > lim)
730 goto bad;
731 if (lim == 0xffffffff)
732 *max_size = ~0u;
733 else {
734 *max_size = (u64)lim + 1 - addr.ea;
735 if (size > *max_size)
736 goto bad;
737 }
738 break;
739 }
740 if (la & (insn_alignment(ctxt, size) - 1))
741 return emulate_gp(ctxt, 0);
742 return X86EMUL_CONTINUE;
743 bad:
744 if (addr.seg == VCPU_SREG_SS)
745 return emulate_ss(ctxt, 0);
746 else
747 return emulate_gp(ctxt, 0);
748 }
749
750 static int linearize(struct x86_emulate_ctxt *ctxt,
751 struct segmented_address addr,
752 unsigned size, bool write,
753 ulong *linear)
754 {
755 unsigned max_size;
756 return __linearize(ctxt, addr, &max_size, size, write, false,
757 ctxt->mode, linear);
758 }
759
760 static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst,
761 enum x86emul_mode mode)
762 {
763 ulong linear;
764 int rc;
765 unsigned max_size;
766 struct segmented_address addr = { .seg = VCPU_SREG_CS,
767 .ea = dst };
768
769 if (ctxt->op_bytes != sizeof(unsigned long))
770 addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
771 rc = __linearize(ctxt, addr, &max_size, 1, false, true, mode, &linear);
772 if (rc == X86EMUL_CONTINUE)
773 ctxt->_eip = addr.ea;
774 return rc;
775 }
776
777 static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
778 {
779 return assign_eip(ctxt, dst, ctxt->mode);
780 }
781
782 static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
783 const struct desc_struct *cs_desc)
784 {
785 enum x86emul_mode mode = ctxt->mode;
786 int rc;
787
788 #ifdef CONFIG_X86_64
789 if (ctxt->mode >= X86EMUL_MODE_PROT16) {
790 if (cs_desc->l) {
791 u64 efer = 0;
792
793 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
794 if (efer & EFER_LMA)
795 mode = X86EMUL_MODE_PROT64;
796 } else
797 mode = X86EMUL_MODE_PROT32; /* temporary value */
798 }
799 #endif
800 if (mode == X86EMUL_MODE_PROT16 || mode == X86EMUL_MODE_PROT32)
801 mode = cs_desc->d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
802 rc = assign_eip(ctxt, dst, mode);
803 if (rc == X86EMUL_CONTINUE)
804 ctxt->mode = mode;
805 return rc;
806 }
807
808 static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
809 {
810 return assign_eip_near(ctxt, ctxt->_eip + rel);
811 }
812
813 static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
814 struct segmented_address addr,
815 void *data,
816 unsigned size)
817 {
818 int rc;
819 ulong linear;
820
821 rc = linearize(ctxt, addr, size, false, &linear);
822 if (rc != X86EMUL_CONTINUE)
823 return rc;
824 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
825 }
826
827 static int segmented_write_std(struct x86_emulate_ctxt *ctxt,
828 struct segmented_address addr,
829 void *data,
830 unsigned int size)
831 {
832 int rc;
833 ulong linear;
834
835 rc = linearize(ctxt, addr, size, true, &linear);
836 if (rc != X86EMUL_CONTINUE)
837 return rc;
838 return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception);
839 }
840
841 /*
842 * Prefetch the remaining bytes of the instruction without crossing page
843 * boundary if they are not in fetch_cache yet.
844 */
845 static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
846 {
847 int rc;
848 unsigned size, max_size;
849 unsigned long linear;
850 int cur_size = ctxt->fetch.end - ctxt->fetch.data;
851 struct segmented_address addr = { .seg = VCPU_SREG_CS,
852 .ea = ctxt->eip + cur_size };
853
854 /*
855 * We do not know exactly how many bytes will be needed, and
856 * __linearize is expensive, so fetch as much as possible. We
857 * just have to avoid going beyond the 15 byte limit, the end
858 * of the segment, or the end of the page.
859 *
860 * __linearize is called with size 0 so that it does not do any
861 * boundary check itself. Instead, we use max_size to check
862 * against op_size.
863 */
864 rc = __linearize(ctxt, addr, &max_size, 0, false, true, ctxt->mode,
865 &linear);
866 if (unlikely(rc != X86EMUL_CONTINUE))
867 return rc;
868
869 size = min_t(unsigned, 15UL ^ cur_size, max_size);
870 size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
871
872 /*
873 * One instruction can only straddle two pages,
874 * and one has been loaded at the beginning of
875 * x86_decode_insn. So, if not enough bytes
876 * still, we must have hit the 15-byte boundary.
877 */
878 if (unlikely(size < op_size))
879 return emulate_gp(ctxt, 0);
880
881 rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
882 size, &ctxt->exception);
883 if (unlikely(rc != X86EMUL_CONTINUE))
884 return rc;
885 ctxt->fetch.end += size;
886 return X86EMUL_CONTINUE;
887 }
888
889 static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
890 unsigned size)
891 {
892 unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr;
893
894 if (unlikely(done_size < size))
895 return __do_insn_fetch_bytes(ctxt, size - done_size);
896 else
897 return X86EMUL_CONTINUE;
898 }
899
900 /* Fetch next part of the instruction being emulated. */
901 #define insn_fetch(_type, _ctxt) \
902 ({ _type _x; \
903 \
904 rc = do_insn_fetch_bytes(_ctxt, sizeof(_type)); \
905 if (rc != X86EMUL_CONTINUE) \
906 goto done; \
907 ctxt->_eip += sizeof(_type); \
908 memcpy(&_x, ctxt->fetch.ptr, sizeof(_type)); \
909 ctxt->fetch.ptr += sizeof(_type); \
910 _x; \
911 })
912
913 #define insn_fetch_arr(_arr, _size, _ctxt) \
914 ({ \
915 rc = do_insn_fetch_bytes(_ctxt, _size); \
916 if (rc != X86EMUL_CONTINUE) \
917 goto done; \
918 ctxt->_eip += (_size); \
919 memcpy(_arr, ctxt->fetch.ptr, _size); \
920 ctxt->fetch.ptr += (_size); \
921 })
922
923 /*
924 * Given the 'reg' portion of a ModRM byte, and a register block, return a
925 * pointer into the block that addresses the relevant register.
926 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
927 */
928 static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg,
929 int byteop)
930 {
931 void *p;
932 int highbyte_regs = (ctxt->rex_prefix == 0) && byteop;
933
934 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
935 p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
936 else
937 p = reg_rmw(ctxt, modrm_reg);
938 return p;
939 }
940
941 static int read_descriptor(struct x86_emulate_ctxt *ctxt,
942 struct segmented_address addr,
943 u16 *size, unsigned long *address, int op_bytes)
944 {
945 int rc;
946
947 if (op_bytes == 2)
948 op_bytes = 3;
949 *address = 0;
950 rc = segmented_read_std(ctxt, addr, size, 2);
951 if (rc != X86EMUL_CONTINUE)
952 return rc;
953 addr.ea += 2;
954 rc = segmented_read_std(ctxt, addr, address, op_bytes);
955 return rc;
956 }
957
958 FASTOP2(add);
959 FASTOP2(or);
960 FASTOP2(adc);
961 FASTOP2(sbb);
962 FASTOP2(and);
963 FASTOP2(sub);
964 FASTOP2(xor);
965 FASTOP2(cmp);
966 FASTOP2(test);
967
968 FASTOP1SRC2(mul, mul_ex);
969 FASTOP1SRC2(imul, imul_ex);
970 FASTOP1SRC2EX(div, div_ex);
971 FASTOP1SRC2EX(idiv, idiv_ex);
972
973 FASTOP3WCL(shld);
974 FASTOP3WCL(shrd);
975
976 FASTOP2W(imul);
977
978 FASTOP1(not);
979 FASTOP1(neg);
980 FASTOP1(inc);
981 FASTOP1(dec);
982
983 FASTOP2CL(rol);
984 FASTOP2CL(ror);
985 FASTOP2CL(rcl);
986 FASTOP2CL(rcr);
987 FASTOP2CL(shl);
988 FASTOP2CL(shr);
989 FASTOP2CL(sar);
990
991 FASTOP2W(bsf);
992 FASTOP2W(bsr);
993 FASTOP2W(bt);
994 FASTOP2W(bts);
995 FASTOP2W(btr);
996 FASTOP2W(btc);
997
998 FASTOP2(xadd);
999
1000 FASTOP2R(cmp, cmp_r);
1001
1002 static int em_bsf_c(struct x86_emulate_ctxt *ctxt)
1003 {
1004 /* If src is zero, do not writeback, but update flags */
1005 if (ctxt->src.val == 0)
1006 ctxt->dst.type = OP_NONE;
1007 return fastop(ctxt, em_bsf);
1008 }
1009
1010 static int em_bsr_c(struct x86_emulate_ctxt *ctxt)
1011 {
1012 /* If src is zero, do not writeback, but update flags */
1013 if (ctxt->src.val == 0)
1014 ctxt->dst.type = OP_NONE;
1015 return fastop(ctxt, em_bsr);
1016 }
1017
1018 static __always_inline u8 test_cc(unsigned int condition, unsigned long flags)
1019 {
1020 u8 rc;
1021 void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf);
1022
1023 flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
1024 asm("push %[flags]; popf; call *%[fastop]"
1025 : "=a"(rc) : [fastop]"r"(fop), [flags]"r"(flags));
1026 return rc;
1027 }
1028
1029 static void fetch_register_operand(struct operand *op)
1030 {
1031 switch (op->bytes) {
1032 case 1:
1033 op->val = *(u8 *)op->addr.reg;
1034 break;
1035 case 2:
1036 op->val = *(u16 *)op->addr.reg;
1037 break;
1038 case 4:
1039 op->val = *(u32 *)op->addr.reg;
1040 break;
1041 case 8:
1042 op->val = *(u64 *)op->addr.reg;
1043 break;
1044 }
1045 }
1046
1047 static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
1048 {
1049 ctxt->ops->get_fpu(ctxt);
1050 switch (reg) {
1051 case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
1052 case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
1053 case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break;
1054 case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break;
1055 case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break;
1056 case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break;
1057 case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break;
1058 case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break;
1059 #ifdef CONFIG_X86_64
1060 case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break;
1061 case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break;
1062 case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break;
1063 case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break;
1064 case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break;
1065 case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break;
1066 case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break;
1067 case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break;
1068 #endif
1069 default: BUG();
1070 }
1071 ctxt->ops->put_fpu(ctxt);
1072 }
1073
1074 static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
1075 int reg)
1076 {
1077 ctxt->ops->get_fpu(ctxt);
1078 switch (reg) {
1079 case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
1080 case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
1081 case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break;
1082 case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break;
1083 case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break;
1084 case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break;
1085 case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break;
1086 case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break;
1087 #ifdef CONFIG_X86_64
1088 case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break;
1089 case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break;
1090 case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break;
1091 case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break;
1092 case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break;
1093 case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break;
1094 case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break;
1095 case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break;
1096 #endif
1097 default: BUG();
1098 }
1099 ctxt->ops->put_fpu(ctxt);
1100 }
1101
1102 static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1103 {
1104 ctxt->ops->get_fpu(ctxt);
1105 switch (reg) {
1106 case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
1107 case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
1108 case 2: asm("movq %%mm2, %0" : "=m"(*data)); break;
1109 case 3: asm("movq %%mm3, %0" : "=m"(*data)); break;
1110 case 4: asm("movq %%mm4, %0" : "=m"(*data)); break;
1111 case 5: asm("movq %%mm5, %0" : "=m"(*data)); break;
1112 case 6: asm("movq %%mm6, %0" : "=m"(*data)); break;
1113 case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
1114 default: BUG();
1115 }
1116 ctxt->ops->put_fpu(ctxt);
1117 }
1118
1119 static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1120 {
1121 ctxt->ops->get_fpu(ctxt);
1122 switch (reg) {
1123 case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
1124 case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
1125 case 2: asm("movq %0, %%mm2" : : "m"(*data)); break;
1126 case 3: asm("movq %0, %%mm3" : : "m"(*data)); break;
1127 case 4: asm("movq %0, %%mm4" : : "m"(*data)); break;
1128 case 5: asm("movq %0, %%mm5" : : "m"(*data)); break;
1129 case 6: asm("movq %0, %%mm6" : : "m"(*data)); break;
1130 case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
1131 default: BUG();
1132 }
1133 ctxt->ops->put_fpu(ctxt);
1134 }
1135
1136 static int em_fninit(struct x86_emulate_ctxt *ctxt)
1137 {
1138 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1139 return emulate_nm(ctxt);
1140
1141 ctxt->ops->get_fpu(ctxt);
1142 asm volatile("fninit");
1143 ctxt->ops->put_fpu(ctxt);
1144 return X86EMUL_CONTINUE;
1145 }
1146
1147 static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
1148 {
1149 u16 fcw;
1150
1151 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1152 return emulate_nm(ctxt);
1153
1154 ctxt->ops->get_fpu(ctxt);
1155 asm volatile("fnstcw %0": "+m"(fcw));
1156 ctxt->ops->put_fpu(ctxt);
1157
1158 ctxt->dst.val = fcw;
1159
1160 return X86EMUL_CONTINUE;
1161 }
1162
1163 static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
1164 {
1165 u16 fsw;
1166
1167 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1168 return emulate_nm(ctxt);
1169
1170 ctxt->ops->get_fpu(ctxt);
1171 asm volatile("fnstsw %0": "+m"(fsw));
1172 ctxt->ops->put_fpu(ctxt);
1173
1174 ctxt->dst.val = fsw;
1175
1176 return X86EMUL_CONTINUE;
1177 }
1178
1179 static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
1180 struct operand *op)
1181 {
1182 unsigned reg = ctxt->modrm_reg;
1183
1184 if (!(ctxt->d & ModRM))
1185 reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
1186
1187 if (ctxt->d & Sse) {
1188 op->type = OP_XMM;
1189 op->bytes = 16;
1190 op->addr.xmm = reg;
1191 read_sse_reg(ctxt, &op->vec_val, reg);
1192 return;
1193 }
1194 if (ctxt->d & Mmx) {
1195 reg &= 7;
1196 op->type = OP_MM;
1197 op->bytes = 8;
1198 op->addr.mm = reg;
1199 return;
1200 }
1201
1202 op->type = OP_REG;
1203 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1204 op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp);
1205
1206 fetch_register_operand(op);
1207 op->orig_val = op->val;
1208 }
1209
1210 static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
1211 {
1212 if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
1213 ctxt->modrm_seg = VCPU_SREG_SS;
1214 }
1215
1216 static int decode_modrm(struct x86_emulate_ctxt *ctxt,
1217 struct operand *op)
1218 {
1219 u8 sib;
1220 int index_reg, base_reg, scale;
1221 int rc = X86EMUL_CONTINUE;
1222 ulong modrm_ea = 0;
1223
1224 ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8); /* REX.R */
1225 index_reg = (ctxt->rex_prefix << 2) & 8; /* REX.X */
1226 base_reg = (ctxt->rex_prefix << 3) & 8; /* REX.B */
1227
1228 ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6;
1229 ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
1230 ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07);
1231 ctxt->modrm_seg = VCPU_SREG_DS;
1232
1233 if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) {
1234 op->type = OP_REG;
1235 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1236 op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
1237 ctxt->d & ByteOp);
1238 if (ctxt->d & Sse) {
1239 op->type = OP_XMM;
1240 op->bytes = 16;
1241 op->addr.xmm = ctxt->modrm_rm;
1242 read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
1243 return rc;
1244 }
1245 if (ctxt->d & Mmx) {
1246 op->type = OP_MM;
1247 op->bytes = 8;
1248 op->addr.mm = ctxt->modrm_rm & 7;
1249 return rc;
1250 }
1251 fetch_register_operand(op);
1252 return rc;
1253 }
1254
1255 op->type = OP_MEM;
1256
1257 if (ctxt->ad_bytes == 2) {
1258 unsigned bx = reg_read(ctxt, VCPU_REGS_RBX);
1259 unsigned bp = reg_read(ctxt, VCPU_REGS_RBP);
1260 unsigned si = reg_read(ctxt, VCPU_REGS_RSI);
1261 unsigned di = reg_read(ctxt, VCPU_REGS_RDI);
1262
1263 /* 16-bit ModR/M decode. */
1264 switch (ctxt->modrm_mod) {
1265 case 0:
1266 if (ctxt->modrm_rm == 6)
1267 modrm_ea += insn_fetch(u16, ctxt);
1268 break;
1269 case 1:
1270 modrm_ea += insn_fetch(s8, ctxt);
1271 break;
1272 case 2:
1273 modrm_ea += insn_fetch(u16, ctxt);
1274 break;
1275 }
1276 switch (ctxt->modrm_rm) {
1277 case 0:
1278 modrm_ea += bx + si;
1279 break;
1280 case 1:
1281 modrm_ea += bx + di;
1282 break;
1283 case 2:
1284 modrm_ea += bp + si;
1285 break;
1286 case 3:
1287 modrm_ea += bp + di;
1288 break;
1289 case 4:
1290 modrm_ea += si;
1291 break;
1292 case 5:
1293 modrm_ea += di;
1294 break;
1295 case 6:
1296 if (ctxt->modrm_mod != 0)
1297 modrm_ea += bp;
1298 break;
1299 case 7:
1300 modrm_ea += bx;
1301 break;
1302 }
1303 if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
1304 (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
1305 ctxt->modrm_seg = VCPU_SREG_SS;
1306 modrm_ea = (u16)modrm_ea;
1307 } else {
1308 /* 32/64-bit ModR/M decode. */
1309 if ((ctxt->modrm_rm & 7) == 4) {
1310 sib = insn_fetch(u8, ctxt);
1311 index_reg |= (sib >> 3) & 7;
1312 base_reg |= sib & 7;
1313 scale = sib >> 6;
1314
1315 if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
1316 modrm_ea += insn_fetch(s32, ctxt);
1317 else {
1318 modrm_ea += reg_read(ctxt, base_reg);
1319 adjust_modrm_seg(ctxt, base_reg);
1320 /* Increment ESP on POP [ESP] */
1321 if ((ctxt->d & IncSP) &&
1322 base_reg == VCPU_REGS_RSP)
1323 modrm_ea += ctxt->op_bytes;
1324 }
1325 if (index_reg != 4)
1326 modrm_ea += reg_read(ctxt, index_reg) << scale;
1327 } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
1328 modrm_ea += insn_fetch(s32, ctxt);
1329 if (ctxt->mode == X86EMUL_MODE_PROT64)
1330 ctxt->rip_relative = 1;
1331 } else {
1332 base_reg = ctxt->modrm_rm;
1333 modrm_ea += reg_read(ctxt, base_reg);
1334 adjust_modrm_seg(ctxt, base_reg);
1335 }
1336 switch (ctxt->modrm_mod) {
1337 case 1:
1338 modrm_ea += insn_fetch(s8, ctxt);
1339 break;
1340 case 2:
1341 modrm_ea += insn_fetch(s32, ctxt);
1342 break;
1343 }
1344 }
1345 op->addr.mem.ea = modrm_ea;
1346 if (ctxt->ad_bytes != 8)
1347 ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
1348
1349 done:
1350 return rc;
1351 }
1352
1353 static int decode_abs(struct x86_emulate_ctxt *ctxt,
1354 struct operand *op)
1355 {
1356 int rc = X86EMUL_CONTINUE;
1357
1358 op->type = OP_MEM;
1359 switch (ctxt->ad_bytes) {
1360 case 2:
1361 op->addr.mem.ea = insn_fetch(u16, ctxt);
1362 break;
1363 case 4:
1364 op->addr.mem.ea = insn_fetch(u32, ctxt);
1365 break;
1366 case 8:
1367 op->addr.mem.ea = insn_fetch(u64, ctxt);
1368 break;
1369 }
1370 done:
1371 return rc;
1372 }
1373
1374 static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
1375 {
1376 long sv = 0, mask;
1377
1378 if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
1379 mask = ~((long)ctxt->dst.bytes * 8 - 1);
1380
1381 if (ctxt->src.bytes == 2)
1382 sv = (s16)ctxt->src.val & (s16)mask;
1383 else if (ctxt->src.bytes == 4)
1384 sv = (s32)ctxt->src.val & (s32)mask;
1385 else
1386 sv = (s64)ctxt->src.val & (s64)mask;
1387
1388 ctxt->dst.addr.mem.ea = address_mask(ctxt,
1389 ctxt->dst.addr.mem.ea + (sv >> 3));
1390 }
1391
1392 /* only subword offset */
1393 ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
1394 }
1395
1396 static int read_emulated(struct x86_emulate_ctxt *ctxt,
1397 unsigned long addr, void *dest, unsigned size)
1398 {
1399 int rc;
1400 struct read_cache *mc = &ctxt->mem_read;
1401
1402 if (mc->pos < mc->end)
1403 goto read_cached;
1404
1405 WARN_ON((mc->end + size) >= sizeof(mc->data));
1406
1407 rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size,
1408 &ctxt->exception);
1409 if (rc != X86EMUL_CONTINUE)
1410 return rc;
1411
1412 mc->end += size;
1413
1414 read_cached:
1415 memcpy(dest, mc->data + mc->pos, size);
1416 mc->pos += size;
1417 return X86EMUL_CONTINUE;
1418 }
1419
1420 static int segmented_read(struct x86_emulate_ctxt *ctxt,
1421 struct segmented_address addr,
1422 void *data,
1423 unsigned size)
1424 {
1425 int rc;
1426 ulong linear;
1427
1428 rc = linearize(ctxt, addr, size, false, &linear);
1429 if (rc != X86EMUL_CONTINUE)
1430 return rc;
1431 return read_emulated(ctxt, linear, data, size);
1432 }
1433
1434 static int segmented_write(struct x86_emulate_ctxt *ctxt,
1435 struct segmented_address addr,
1436 const void *data,
1437 unsigned size)
1438 {
1439 int rc;
1440 ulong linear;
1441
1442 rc = linearize(ctxt, addr, size, true, &linear);
1443 if (rc != X86EMUL_CONTINUE)
1444 return rc;
1445 return ctxt->ops->write_emulated(ctxt, linear, data, size,
1446 &ctxt->exception);
1447 }
1448
1449 static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
1450 struct segmented_address addr,
1451 const void *orig_data, const void *data,
1452 unsigned size)
1453 {
1454 int rc;
1455 ulong linear;
1456
1457 rc = linearize(ctxt, addr, size, true, &linear);
1458 if (rc != X86EMUL_CONTINUE)
1459 return rc;
1460 return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
1461 size, &ctxt->exception);
1462 }
1463
1464 static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
1465 unsigned int size, unsigned short port,
1466 void *dest)
1467 {
1468 struct read_cache *rc = &ctxt->io_read;
1469
1470 if (rc->pos == rc->end) { /* refill pio read ahead */
1471 unsigned int in_page, n;
1472 unsigned int count = ctxt->rep_prefix ?
1473 address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
1474 in_page = (ctxt->eflags & X86_EFLAGS_DF) ?
1475 offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
1476 PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
1477 n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count);
1478 if (n == 0)
1479 n = 1;
1480 rc->pos = rc->end = 0;
1481 if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
1482 return 0;
1483 rc->end = n * size;
1484 }
1485
1486 if (ctxt->rep_prefix && (ctxt->d & String) &&
1487 !(ctxt->eflags & X86_EFLAGS_DF)) {
1488 ctxt->dst.data = rc->data + rc->pos;
1489 ctxt->dst.type = OP_MEM_STR;
1490 ctxt->dst.count = (rc->end - rc->pos) / size;
1491 rc->pos = rc->end;
1492 } else {
1493 memcpy(dest, rc->data + rc->pos, size);
1494 rc->pos += size;
1495 }
1496 return 1;
1497 }
1498
1499 static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
1500 u16 index, struct desc_struct *desc)
1501 {
1502 struct desc_ptr dt;
1503 ulong addr;
1504
1505 ctxt->ops->get_idt(ctxt, &dt);
1506
1507 if (dt.size < index * 8 + 7)
1508 return emulate_gp(ctxt, index << 3 | 0x2);
1509
1510 addr = dt.address + index * 8;
1511 return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
1512 &ctxt->exception);
1513 }
1514
1515 static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
1516 u16 selector, struct desc_ptr *dt)
1517 {
1518 const struct x86_emulate_ops *ops = ctxt->ops;
1519 u32 base3 = 0;
1520
1521 if (selector & 1 << 2) {
1522 struct desc_struct desc;
1523 u16 sel;
1524
1525 memset (dt, 0, sizeof *dt);
1526 if (!ops->get_segment(ctxt, &sel, &desc, &base3,
1527 VCPU_SREG_LDTR))
1528 return;
1529
1530 dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
1531 dt->address = get_desc_base(&desc) | ((u64)base3 << 32);
1532 } else
1533 ops->get_gdt(ctxt, dt);
1534 }
1535
1536 static int get_descriptor_ptr(struct x86_emulate_ctxt *ctxt,
1537 u16 selector, ulong *desc_addr_p)
1538 {
1539 struct desc_ptr dt;
1540 u16 index = selector >> 3;
1541 ulong addr;
1542
1543 get_descriptor_table_ptr(ctxt, selector, &dt);
1544
1545 if (dt.size < index * 8 + 7)
1546 return emulate_gp(ctxt, selector & 0xfffc);
1547
1548 addr = dt.address + index * 8;
1549
1550 #ifdef CONFIG_X86_64
1551 if (addr >> 32 != 0) {
1552 u64 efer = 0;
1553
1554 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1555 if (!(efer & EFER_LMA))
1556 addr &= (u32)-1;
1557 }
1558 #endif
1559
1560 *desc_addr_p = addr;
1561 return X86EMUL_CONTINUE;
1562 }
1563
1564 /* allowed just for 8 bytes segments */
1565 static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1566 u16 selector, struct desc_struct *desc,
1567 ulong *desc_addr_p)
1568 {
1569 int rc;
1570
1571 rc = get_descriptor_ptr(ctxt, selector, desc_addr_p);
1572 if (rc != X86EMUL_CONTINUE)
1573 return rc;
1574
1575 return ctxt->ops->read_std(ctxt, *desc_addr_p, desc, sizeof(*desc),
1576 &ctxt->exception);
1577 }
1578
1579 /* allowed just for 8 bytes segments */
1580 static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1581 u16 selector, struct desc_struct *desc)
1582 {
1583 int rc;
1584 ulong addr;
1585
1586 rc = get_descriptor_ptr(ctxt, selector, &addr);
1587 if (rc != X86EMUL_CONTINUE)
1588 return rc;
1589
1590 return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc,
1591 &ctxt->exception);
1592 }
1593
1594 static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1595 u16 selector, int seg, u8 cpl,
1596 enum x86_transfer_type transfer,
1597 struct desc_struct *desc)
1598 {
1599 struct desc_struct seg_desc, old_desc;
1600 u8 dpl, rpl;
1601 unsigned err_vec = GP_VECTOR;
1602 u32 err_code = 0;
1603 bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
1604 ulong desc_addr;
1605 int ret;
1606 u16 dummy;
1607 u32 base3 = 0;
1608
1609 memset(&seg_desc, 0, sizeof seg_desc);
1610
1611 if (ctxt->mode == X86EMUL_MODE_REAL) {
1612 /* set real mode segment descriptor (keep limit etc. for
1613 * unreal mode) */
1614 ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg);
1615 set_desc_base(&seg_desc, selector << 4);
1616 goto load;
1617 } else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) {
1618 /* VM86 needs a clean new segment descriptor */
1619 set_desc_base(&seg_desc, selector << 4);
1620 set_desc_limit(&seg_desc, 0xffff);
1621 seg_desc.type = 3;
1622 seg_desc.p = 1;
1623 seg_desc.s = 1;
1624 seg_desc.dpl = 3;
1625 goto load;
1626 }
1627
1628 rpl = selector & 3;
1629
1630 /* TR should be in GDT only */
1631 if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1632 goto exception;
1633
1634 /* NULL selector is not valid for TR, CS and (except for long mode) SS */
1635 if (null_selector) {
1636 if (seg == VCPU_SREG_CS || seg == VCPU_SREG_TR)
1637 goto exception;
1638
1639 if (seg == VCPU_SREG_SS) {
1640 if (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl)
1641 goto exception;
1642
1643 /*
1644 * ctxt->ops->set_segment expects the CPL to be in
1645 * SS.DPL, so fake an expand-up 32-bit data segment.
1646 */
1647 seg_desc.type = 3;
1648 seg_desc.p = 1;
1649 seg_desc.s = 1;
1650 seg_desc.dpl = cpl;
1651 seg_desc.d = 1;
1652 seg_desc.g = 1;
1653 }
1654
1655 /* Skip all following checks */
1656 goto load;
1657 }
1658
1659 ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
1660 if (ret != X86EMUL_CONTINUE)
1661 return ret;
1662
1663 err_code = selector & 0xfffc;
1664 err_vec = (transfer == X86_TRANSFER_TASK_SWITCH) ? TS_VECTOR :
1665 GP_VECTOR;
1666
1667 /* can't load system descriptor into segment selector */
1668 if (seg <= VCPU_SREG_GS && !seg_desc.s) {
1669 if (transfer == X86_TRANSFER_CALL_JMP)
1670 return X86EMUL_UNHANDLEABLE;
1671 goto exception;
1672 }
1673
1674 if (!seg_desc.p) {
1675 err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1676 goto exception;
1677 }
1678
1679 dpl = seg_desc.dpl;
1680
1681 switch (seg) {
1682 case VCPU_SREG_SS:
1683 /*
1684 * segment is not a writable data segment or segment
1685 * selector's RPL != CPL or segment selector's RPL != CPL
1686 */
1687 if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1688 goto exception;
1689 break;
1690 case VCPU_SREG_CS:
1691 if (!(seg_desc.type & 8))
1692 goto exception;
1693
1694 if (seg_desc.type & 4) {
1695 /* conforming */
1696 if (dpl > cpl)
1697 goto exception;
1698 } else {
1699 /* nonconforming */
1700 if (rpl > cpl || dpl != cpl)
1701 goto exception;
1702 }
1703 /* in long-mode d/b must be clear if l is set */
1704 if (seg_desc.d && seg_desc.l) {
1705 u64 efer = 0;
1706
1707 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1708 if (efer & EFER_LMA)
1709 goto exception;
1710 }
1711
1712 /* CS(RPL) <- CPL */
1713 selector = (selector & 0xfffc) | cpl;
1714 break;
1715 case VCPU_SREG_TR:
1716 if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1717 goto exception;
1718 old_desc = seg_desc;
1719 seg_desc.type |= 2; /* busy */
1720 ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
1721 sizeof(seg_desc), &ctxt->exception);
1722 if (ret != X86EMUL_CONTINUE)
1723 return ret;
1724 break;
1725 case VCPU_SREG_LDTR:
1726 if (seg_desc.s || seg_desc.type != 2)
1727 goto exception;
1728 break;
1729 default: /* DS, ES, FS, or GS */
1730 /*
1731 * segment is not a data or readable code segment or
1732 * ((segment is a data or nonconforming code segment)
1733 * and (both RPL and CPL > DPL))
1734 */
1735 if ((seg_desc.type & 0xa) == 0x8 ||
1736 (((seg_desc.type & 0xc) != 0xc) &&
1737 (rpl > dpl && cpl > dpl)))
1738 goto exception;
1739 break;
1740 }
1741
1742 if (seg_desc.s) {
1743 /* mark segment as accessed */
1744 if (!(seg_desc.type & 1)) {
1745 seg_desc.type |= 1;
1746 ret = write_segment_descriptor(ctxt, selector,
1747 &seg_desc);
1748 if (ret != X86EMUL_CONTINUE)
1749 return ret;
1750 }
1751 } else if (ctxt->mode == X86EMUL_MODE_PROT64) {
1752 ret = ctxt->ops->read_std(ctxt, desc_addr+8, &base3,
1753 sizeof(base3), &ctxt->exception);
1754 if (ret != X86EMUL_CONTINUE)
1755 return ret;
1756 if (emul_is_noncanonical_address(get_desc_base(&seg_desc) |
1757 ((u64)base3 << 32), ctxt))
1758 return emulate_gp(ctxt, 0);
1759 }
1760 load:
1761 ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
1762 if (desc)
1763 *desc = seg_desc;
1764 return X86EMUL_CONTINUE;
1765 exception:
1766 return emulate_exception(ctxt, err_vec, err_code, true);
1767 }
1768
1769 static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1770 u16 selector, int seg)
1771 {
1772 u8 cpl = ctxt->ops->cpl(ctxt);
1773
1774 /*
1775 * None of MOV, POP and LSS can load a NULL selector in CPL=3, but
1776 * they can load it at CPL<3 (Intel's manual says only LSS can,
1777 * but it's wrong).
1778 *
1779 * However, the Intel manual says that putting IST=1/DPL=3 in
1780 * an interrupt gate will result in SS=3 (the AMD manual instead
1781 * says it doesn't), so allow SS=3 in __load_segment_descriptor
1782 * and only forbid it here.
1783 */
1784 if (seg == VCPU_SREG_SS && selector == 3 &&
1785 ctxt->mode == X86EMUL_MODE_PROT64)
1786 return emulate_exception(ctxt, GP_VECTOR, 0, true);
1787
1788 return __load_segment_descriptor(ctxt, selector, seg, cpl,
1789 X86_TRANSFER_NONE, NULL);
1790 }
1791
1792 static void write_register_operand(struct operand *op)
1793 {
1794 return assign_register(op->addr.reg, op->val, op->bytes);
1795 }
1796
1797 static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
1798 {
1799 switch (op->type) {
1800 case OP_REG:
1801 write_register_operand(op);
1802 break;
1803 case OP_MEM:
1804 if (ctxt->lock_prefix)
1805 return segmented_cmpxchg(ctxt,
1806 op->addr.mem,
1807 &op->orig_val,
1808 &op->val,
1809 op->bytes);
1810 else
1811 return segmented_write(ctxt,
1812 op->addr.mem,
1813 &op->val,
1814 op->bytes);
1815 break;
1816 case OP_MEM_STR:
1817 return segmented_write(ctxt,
1818 op->addr.mem,
1819 op->data,
1820 op->bytes * op->count);
1821 break;
1822 case OP_XMM:
1823 write_sse_reg(ctxt, &op->vec_val, op->addr.xmm);
1824 break;
1825 case OP_MM:
1826 write_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
1827 break;
1828 case OP_NONE:
1829 /* no writeback */
1830 break;
1831 default:
1832 break;
1833 }
1834 return X86EMUL_CONTINUE;
1835 }
1836
1837 static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
1838 {
1839 struct segmented_address addr;
1840
1841 rsp_increment(ctxt, -bytes);
1842 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1843 addr.seg = VCPU_SREG_SS;
1844
1845 return segmented_write(ctxt, addr, data, bytes);
1846 }
1847
1848 static int em_push(struct x86_emulate_ctxt *ctxt)
1849 {
1850 /* Disable writeback. */
1851 ctxt->dst.type = OP_NONE;
1852 return push(ctxt, &ctxt->src.val, ctxt->op_bytes);
1853 }
1854
1855 static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1856 void *dest, int len)
1857 {
1858 int rc;
1859 struct segmented_address addr;
1860
1861 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1862 addr.seg = VCPU_SREG_SS;
1863 rc = segmented_read(ctxt, addr, dest, len);
1864 if (rc != X86EMUL_CONTINUE)
1865 return rc;
1866
1867 rsp_increment(ctxt, len);
1868 return rc;
1869 }
1870
1871 static int em_pop(struct x86_emulate_ctxt *ctxt)
1872 {
1873 return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1874 }
1875
1876 static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1877 void *dest, int len)
1878 {
1879 int rc;
1880 unsigned long val, change_mask;
1881 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
1882 int cpl = ctxt->ops->cpl(ctxt);
1883
1884 rc = emulate_pop(ctxt, &val, len);
1885 if (rc != X86EMUL_CONTINUE)
1886 return rc;
1887
1888 change_mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
1889 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF |
1890 X86_EFLAGS_TF | X86_EFLAGS_DF | X86_EFLAGS_NT |
1891 X86_EFLAGS_AC | X86_EFLAGS_ID;
1892
1893 switch(ctxt->mode) {
1894 case X86EMUL_MODE_PROT64:
1895 case X86EMUL_MODE_PROT32:
1896 case X86EMUL_MODE_PROT16:
1897 if (cpl == 0)
1898 change_mask |= X86_EFLAGS_IOPL;
1899 if (cpl <= iopl)
1900 change_mask |= X86_EFLAGS_IF;
1901 break;
1902 case X86EMUL_MODE_VM86:
1903 if (iopl < 3)
1904 return emulate_gp(ctxt, 0);
1905 change_mask |= X86_EFLAGS_IF;
1906 break;
1907 default: /* real mode */
1908 change_mask |= (X86_EFLAGS_IOPL | X86_EFLAGS_IF);
1909 break;
1910 }
1911
1912 *(unsigned long *)dest =
1913 (ctxt->eflags & ~change_mask) | (val & change_mask);
1914
1915 return rc;
1916 }
1917
1918 static int em_popf(struct x86_emulate_ctxt *ctxt)
1919 {
1920 ctxt->dst.type = OP_REG;
1921 ctxt->dst.addr.reg = &ctxt->eflags;
1922 ctxt->dst.bytes = ctxt->op_bytes;
1923 return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1924 }
1925
1926 static int em_enter(struct x86_emulate_ctxt *ctxt)
1927 {
1928 int rc;
1929 unsigned frame_size = ctxt->src.val;
1930 unsigned nesting_level = ctxt->src2.val & 31;
1931 ulong rbp;
1932
1933 if (nesting_level)
1934 return X86EMUL_UNHANDLEABLE;
1935
1936 rbp = reg_read(ctxt, VCPU_REGS_RBP);
1937 rc = push(ctxt, &rbp, stack_size(ctxt));
1938 if (rc != X86EMUL_CONTINUE)
1939 return rc;
1940 assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP),
1941 stack_mask(ctxt));
1942 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP),
1943 reg_read(ctxt, VCPU_REGS_RSP) - frame_size,
1944 stack_mask(ctxt));
1945 return X86EMUL_CONTINUE;
1946 }
1947
1948 static int em_leave(struct x86_emulate_ctxt *ctxt)
1949 {
1950 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP),
1951 stack_mask(ctxt));
1952 return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes);
1953 }
1954
1955 static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
1956 {
1957 int seg = ctxt->src2.val;
1958
1959 ctxt->src.val = get_segment_selector(ctxt, seg);
1960 if (ctxt->op_bytes == 4) {
1961 rsp_increment(ctxt, -2);
1962 ctxt->op_bytes = 2;
1963 }
1964
1965 return em_push(ctxt);
1966 }
1967
1968 static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
1969 {
1970 int seg = ctxt->src2.val;
1971 unsigned long selector;
1972 int rc;
1973
1974 rc = emulate_pop(ctxt, &selector, 2);
1975 if (rc != X86EMUL_CONTINUE)
1976 return rc;
1977
1978 if (ctxt->modrm_reg == VCPU_SREG_SS)
1979 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
1980 if (ctxt->op_bytes > 2)
1981 rsp_increment(ctxt, ctxt->op_bytes - 2);
1982
1983 rc = load_segment_descriptor(ctxt, (u16)selector, seg);
1984 return rc;
1985 }
1986
1987 static int em_pusha(struct x86_emulate_ctxt *ctxt)
1988 {
1989 unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP);
1990 int rc = X86EMUL_CONTINUE;
1991 int reg = VCPU_REGS_RAX;
1992
1993 while (reg <= VCPU_REGS_RDI) {
1994 (reg == VCPU_REGS_RSP) ?
1995 (ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg));
1996
1997 rc = em_push(ctxt);
1998 if (rc != X86EMUL_CONTINUE)
1999 return rc;
2000
2001 ++reg;
2002 }
2003
2004 return rc;
2005 }
2006
2007 static int em_pushf(struct x86_emulate_ctxt *ctxt)
2008 {
2009 ctxt->src.val = (unsigned long)ctxt->eflags & ~X86_EFLAGS_VM;
2010 return em_push(ctxt);
2011 }
2012
2013 static int em_popa(struct x86_emulate_ctxt *ctxt)
2014 {
2015 int rc = X86EMUL_CONTINUE;
2016 int reg = VCPU_REGS_RDI;
2017 u32 val;
2018
2019 while (reg >= VCPU_REGS_RAX) {
2020 if (reg == VCPU_REGS_RSP) {
2021 rsp_increment(ctxt, ctxt->op_bytes);
2022 --reg;
2023 }
2024
2025 rc = emulate_pop(ctxt, &val, ctxt->op_bytes);
2026 if (rc != X86EMUL_CONTINUE)
2027 break;
2028 assign_register(reg_rmw(ctxt, reg), val, ctxt->op_bytes);
2029 --reg;
2030 }
2031 return rc;
2032 }
2033
2034 static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
2035 {
2036 const struct x86_emulate_ops *ops = ctxt->ops;
2037 int rc;
2038 struct desc_ptr dt;
2039 gva_t cs_addr;
2040 gva_t eip_addr;
2041 u16 cs, eip;
2042
2043 /* TODO: Add limit checks */
2044 ctxt->src.val = ctxt->eflags;
2045 rc = em_push(ctxt);
2046 if (rc != X86EMUL_CONTINUE)
2047 return rc;
2048
2049 ctxt->eflags &= ~(X86_EFLAGS_IF | X86_EFLAGS_TF | X86_EFLAGS_AC);
2050
2051 ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
2052 rc = em_push(ctxt);
2053 if (rc != X86EMUL_CONTINUE)
2054 return rc;
2055
2056 ctxt->src.val = ctxt->_eip;
2057 rc = em_push(ctxt);
2058 if (rc != X86EMUL_CONTINUE)
2059 return rc;
2060
2061 ops->get_idt(ctxt, &dt);
2062
2063 eip_addr = dt.address + (irq << 2);
2064 cs_addr = dt.address + (irq << 2) + 2;
2065
2066 rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception);
2067 if (rc != X86EMUL_CONTINUE)
2068 return rc;
2069
2070 rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception);
2071 if (rc != X86EMUL_CONTINUE)
2072 return rc;
2073
2074 rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
2075 if (rc != X86EMUL_CONTINUE)
2076 return rc;
2077
2078 ctxt->_eip = eip;
2079
2080 return rc;
2081 }
2082
2083 int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
2084 {
2085 int rc;
2086
2087 invalidate_registers(ctxt);
2088 rc = __emulate_int_real(ctxt, irq);
2089 if (rc == X86EMUL_CONTINUE)
2090 writeback_registers(ctxt);
2091 return rc;
2092 }
2093
2094 static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
2095 {
2096 switch(ctxt->mode) {
2097 case X86EMUL_MODE_REAL:
2098 return __emulate_int_real(ctxt, irq);
2099 case X86EMUL_MODE_VM86:
2100 case X86EMUL_MODE_PROT16:
2101 case X86EMUL_MODE_PROT32:
2102 case X86EMUL_MODE_PROT64:
2103 default:
2104 /* Protected mode interrupts unimplemented yet */
2105 return X86EMUL_UNHANDLEABLE;
2106 }
2107 }
2108
2109 static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
2110 {
2111 int rc = X86EMUL_CONTINUE;
2112 unsigned long temp_eip = 0;
2113 unsigned long temp_eflags = 0;
2114 unsigned long cs = 0;
2115 unsigned long mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
2116 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_TF |
2117 X86_EFLAGS_IF | X86_EFLAGS_DF | X86_EFLAGS_OF |
2118 X86_EFLAGS_IOPL | X86_EFLAGS_NT | X86_EFLAGS_RF |
2119 X86_EFLAGS_AC | X86_EFLAGS_ID |
2120 X86_EFLAGS_FIXED;
2121 unsigned long vm86_mask = X86_EFLAGS_VM | X86_EFLAGS_VIF |
2122 X86_EFLAGS_VIP;
2123
2124 /* TODO: Add stack limit check */
2125
2126 rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
2127
2128 if (rc != X86EMUL_CONTINUE)
2129 return rc;
2130
2131 if (temp_eip & ~0xffff)
2132 return emulate_gp(ctxt, 0);
2133
2134 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2135
2136 if (rc != X86EMUL_CONTINUE)
2137 return rc;
2138
2139 rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
2140
2141 if (rc != X86EMUL_CONTINUE)
2142 return rc;
2143
2144 rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
2145
2146 if (rc != X86EMUL_CONTINUE)
2147 return rc;
2148
2149 ctxt->_eip = temp_eip;
2150
2151 if (ctxt->op_bytes == 4)
2152 ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
2153 else if (ctxt->op_bytes == 2) {
2154 ctxt->eflags &= ~0xffff;
2155 ctxt->eflags |= temp_eflags;
2156 }
2157
2158 ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
2159 ctxt->eflags |= X86_EFLAGS_FIXED;
2160 ctxt->ops->set_nmi_mask(ctxt, false);
2161
2162 return rc;
2163 }
2164
2165 static int em_iret(struct x86_emulate_ctxt *ctxt)
2166 {
2167 switch(ctxt->mode) {
2168 case X86EMUL_MODE_REAL:
2169 return emulate_iret_real(ctxt);
2170 case X86EMUL_MODE_VM86:
2171 case X86EMUL_MODE_PROT16:
2172 case X86EMUL_MODE_PROT32:
2173 case X86EMUL_MODE_PROT64:
2174 default:
2175 /* iret from protected mode unimplemented yet */
2176 return X86EMUL_UNHANDLEABLE;
2177 }
2178 }
2179
2180 static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
2181 {
2182 int rc;
2183 unsigned short sel;
2184 struct desc_struct new_desc;
2185 u8 cpl = ctxt->ops->cpl(ctxt);
2186
2187 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2188
2189 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
2190 X86_TRANSFER_CALL_JMP,
2191 &new_desc);
2192 if (rc != X86EMUL_CONTINUE)
2193 return rc;
2194
2195 rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
2196 /* Error handling is not implemented. */
2197 if (rc != X86EMUL_CONTINUE)
2198 return X86EMUL_UNHANDLEABLE;
2199
2200 return rc;
2201 }
2202
2203 static int em_jmp_abs(struct x86_emulate_ctxt *ctxt)
2204 {
2205 return assign_eip_near(ctxt, ctxt->src.val);
2206 }
2207
2208 static int em_call_near_abs(struct x86_emulate_ctxt *ctxt)
2209 {
2210 int rc;
2211 long int old_eip;
2212
2213 old_eip = ctxt->_eip;
2214 rc = assign_eip_near(ctxt, ctxt->src.val);
2215 if (rc != X86EMUL_CONTINUE)
2216 return rc;
2217 ctxt->src.val = old_eip;
2218 rc = em_push(ctxt);
2219 return rc;
2220 }
2221
2222 static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
2223 {
2224 u64 old = ctxt->dst.orig_val64;
2225
2226 if (ctxt->dst.bytes == 16)
2227 return X86EMUL_UNHANDLEABLE;
2228
2229 if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) ||
2230 ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
2231 *reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
2232 *reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
2233 ctxt->eflags &= ~X86_EFLAGS_ZF;
2234 } else {
2235 ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
2236 (u32) reg_read(ctxt, VCPU_REGS_RBX);
2237
2238 ctxt->eflags |= X86_EFLAGS_ZF;
2239 }
2240 return X86EMUL_CONTINUE;
2241 }
2242
2243 static int em_ret(struct x86_emulate_ctxt *ctxt)
2244 {
2245 int rc;
2246 unsigned long eip;
2247
2248 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2249 if (rc != X86EMUL_CONTINUE)
2250 return rc;
2251
2252 return assign_eip_near(ctxt, eip);
2253 }
2254
2255 static int em_ret_far(struct x86_emulate_ctxt *ctxt)
2256 {
2257 int rc;
2258 unsigned long eip, cs;
2259 int cpl = ctxt->ops->cpl(ctxt);
2260 struct desc_struct new_desc;
2261
2262 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2263 if (rc != X86EMUL_CONTINUE)
2264 return rc;
2265 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2266 if (rc != X86EMUL_CONTINUE)
2267 return rc;
2268 /* Outer-privilege level return is not implemented */
2269 if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
2270 return X86EMUL_UNHANDLEABLE;
2271 rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, cpl,
2272 X86_TRANSFER_RET,
2273 &new_desc);
2274 if (rc != X86EMUL_CONTINUE)
2275 return rc;
2276 rc = assign_eip_far(ctxt, eip, &new_desc);
2277 /* Error handling is not implemented. */
2278 if (rc != X86EMUL_CONTINUE)
2279 return X86EMUL_UNHANDLEABLE;
2280
2281 return rc;
2282 }
2283
2284 static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
2285 {
2286 int rc;
2287
2288 rc = em_ret_far(ctxt);
2289 if (rc != X86EMUL_CONTINUE)
2290 return rc;
2291 rsp_increment(ctxt, ctxt->src.val);
2292 return X86EMUL_CONTINUE;
2293 }
2294
2295 static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
2296 {
2297 /* Save real source value, then compare EAX against destination. */
2298 ctxt->dst.orig_val = ctxt->dst.val;
2299 ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX);
2300 ctxt->src.orig_val = ctxt->src.val;
2301 ctxt->src.val = ctxt->dst.orig_val;
2302 fastop(ctxt, em_cmp);
2303
2304 if (ctxt->eflags & X86_EFLAGS_ZF) {
2305 /* Success: write back to memory; no update of EAX */
2306 ctxt->src.type = OP_NONE;
2307 ctxt->dst.val = ctxt->src.orig_val;
2308 } else {
2309 /* Failure: write the value we saw to EAX. */
2310 ctxt->src.type = OP_REG;
2311 ctxt->src.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
2312 ctxt->src.val = ctxt->dst.orig_val;
2313 /* Create write-cycle to dest by writing the same value */
2314 ctxt->dst.val = ctxt->dst.orig_val;
2315 }
2316 return X86EMUL_CONTINUE;
2317 }
2318
2319 static int em_lseg(struct x86_emulate_ctxt *ctxt)
2320 {
2321 int seg = ctxt->src2.val;
2322 unsigned short sel;
2323 int rc;
2324
2325 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2326
2327 rc = load_segment_descriptor(ctxt, sel, seg);
2328 if (rc != X86EMUL_CONTINUE)
2329 return rc;
2330
2331 ctxt->dst.val = ctxt->src.val;
2332 return rc;
2333 }
2334
2335 static int emulator_has_longmode(struct x86_emulate_ctxt *ctxt)
2336 {
2337 u32 eax, ebx, ecx, edx;
2338
2339 eax = 0x80000001;
2340 ecx = 0;
2341 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
2342 return edx & bit(X86_FEATURE_LM);
2343 }
2344
2345 #define GET_SMSTATE(type, smbase, offset) \
2346 ({ \
2347 type __val; \
2348 int r = ctxt->ops->read_phys(ctxt, smbase + offset, &__val, \
2349 sizeof(__val)); \
2350 if (r != X86EMUL_CONTINUE) \
2351 return X86EMUL_UNHANDLEABLE; \
2352 __val; \
2353 })
2354
2355 static void rsm_set_desc_flags(struct desc_struct *desc, u32 flags)
2356 {
2357 desc->g = (flags >> 23) & 1;
2358 desc->d = (flags >> 22) & 1;
2359 desc->l = (flags >> 21) & 1;
2360 desc->avl = (flags >> 20) & 1;
2361 desc->p = (flags >> 15) & 1;
2362 desc->dpl = (flags >> 13) & 3;
2363 desc->s = (flags >> 12) & 1;
2364 desc->type = (flags >> 8) & 15;
2365 }
2366
2367 static int rsm_load_seg_32(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
2368 {
2369 struct desc_struct desc;
2370 int offset;
2371 u16 selector;
2372
2373 selector = GET_SMSTATE(u32, smbase, 0x7fa8 + n * 4);
2374
2375 if (n < 3)
2376 offset = 0x7f84 + n * 12;
2377 else
2378 offset = 0x7f2c + (n - 3) * 12;
2379
2380 set_desc_base(&desc, GET_SMSTATE(u32, smbase, offset + 8));
2381 set_desc_limit(&desc, GET_SMSTATE(u32, smbase, offset + 4));
2382 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, offset));
2383 ctxt->ops->set_segment(ctxt, selector, &desc, 0, n);
2384 return X86EMUL_CONTINUE;
2385 }
2386
2387 static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
2388 {
2389 struct desc_struct desc;
2390 int offset;
2391 u16 selector;
2392 u32 base3;
2393
2394 offset = 0x7e00 + n * 16;
2395
2396 selector = GET_SMSTATE(u16, smbase, offset);
2397 rsm_set_desc_flags(&desc, GET_SMSTATE(u16, smbase, offset + 2) << 8);
2398 set_desc_limit(&desc, GET_SMSTATE(u32, smbase, offset + 4));
2399 set_desc_base(&desc, GET_SMSTATE(u32, smbase, offset + 8));
2400 base3 = GET_SMSTATE(u32, smbase, offset + 12);
2401
2402 ctxt->ops->set_segment(ctxt, selector, &desc, base3, n);
2403 return X86EMUL_CONTINUE;
2404 }
2405
2406 static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
2407 u64 cr0, u64 cr4)
2408 {
2409 int bad;
2410
2411 /*
2412 * First enable PAE, long mode needs it before CR0.PG = 1 is set.
2413 * Then enable protected mode. However, PCID cannot be enabled
2414 * if EFER.LMA=0, so set it separately.
2415 */
2416 bad = ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
2417 if (bad)
2418 return X86EMUL_UNHANDLEABLE;
2419
2420 bad = ctxt->ops->set_cr(ctxt, 0, cr0);
2421 if (bad)
2422 return X86EMUL_UNHANDLEABLE;
2423
2424 if (cr4 & X86_CR4_PCIDE) {
2425 bad = ctxt->ops->set_cr(ctxt, 4, cr4);
2426 if (bad)
2427 return X86EMUL_UNHANDLEABLE;
2428 }
2429
2430 return X86EMUL_CONTINUE;
2431 }
2432
2433 static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, u64 smbase)
2434 {
2435 struct desc_struct desc;
2436 struct desc_ptr dt;
2437 u16 selector;
2438 u32 val, cr0, cr4;
2439 int i;
2440
2441 cr0 = GET_SMSTATE(u32, smbase, 0x7ffc);
2442 ctxt->ops->set_cr(ctxt, 3, GET_SMSTATE(u32, smbase, 0x7ff8));
2443 ctxt->eflags = GET_SMSTATE(u32, smbase, 0x7ff4) | X86_EFLAGS_FIXED;
2444 ctxt->_eip = GET_SMSTATE(u32, smbase, 0x7ff0);
2445
2446 for (i = 0; i < 8; i++)
2447 *reg_write(ctxt, i) = GET_SMSTATE(u32, smbase, 0x7fd0 + i * 4);
2448
2449 val = GET_SMSTATE(u32, smbase, 0x7fcc);
2450 ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
2451 val = GET_SMSTATE(u32, smbase, 0x7fc8);
2452 ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
2453
2454 selector = GET_SMSTATE(u32, smbase, 0x7fc4);
2455 set_desc_base(&desc, GET_SMSTATE(u32, smbase, 0x7f64));
2456 set_desc_limit(&desc, GET_SMSTATE(u32, smbase, 0x7f60));
2457 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, 0x7f5c));
2458 ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_TR);
2459
2460 selector = GET_SMSTATE(u32, smbase, 0x7fc0);
2461 set_desc_base(&desc, GET_SMSTATE(u32, smbase, 0x7f80));
2462 set_desc_limit(&desc, GET_SMSTATE(u32, smbase, 0x7f7c));
2463 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, 0x7f78));
2464 ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_LDTR);
2465
2466 dt.address = GET_SMSTATE(u32, smbase, 0x7f74);
2467 dt.size = GET_SMSTATE(u32, smbase, 0x7f70);
2468 ctxt->ops->set_gdt(ctxt, &dt);
2469
2470 dt.address = GET_SMSTATE(u32, smbase, 0x7f58);
2471 dt.size = GET_SMSTATE(u32, smbase, 0x7f54);
2472 ctxt->ops->set_idt(ctxt, &dt);
2473
2474 for (i = 0; i < 6; i++) {
2475 int r = rsm_load_seg_32(ctxt, smbase, i);
2476 if (r != X86EMUL_CONTINUE)
2477 return r;
2478 }
2479
2480 cr4 = GET_SMSTATE(u32, smbase, 0x7f14);
2481
2482 ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7ef8));
2483
2484 return rsm_enter_protected_mode(ctxt, cr0, cr4);
2485 }
2486
2487 static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
2488 {
2489 struct desc_struct desc;
2490 struct desc_ptr dt;
2491 u64 val, cr0, cr4;
2492 u32 base3;
2493 u16 selector;
2494 int i, r;
2495
2496 for (i = 0; i < 16; i++)
2497 *reg_write(ctxt, i) = GET_SMSTATE(u64, smbase, 0x7ff8 - i * 8);
2498
2499 ctxt->_eip = GET_SMSTATE(u64, smbase, 0x7f78);
2500 ctxt->eflags = GET_SMSTATE(u32, smbase, 0x7f70) | X86_EFLAGS_FIXED;
2501
2502 val = GET_SMSTATE(u32, smbase, 0x7f68);
2503 ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
2504 val = GET_SMSTATE(u32, smbase, 0x7f60);
2505 ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
2506
2507 cr0 = GET_SMSTATE(u64, smbase, 0x7f58);
2508 ctxt->ops->set_cr(ctxt, 3, GET_SMSTATE(u64, smbase, 0x7f50));
2509 cr4 = GET_SMSTATE(u64, smbase, 0x7f48);
2510 ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7f00));
2511 val = GET_SMSTATE(u64, smbase, 0x7ed0);
2512 ctxt->ops->set_msr(ctxt, MSR_EFER, val & ~EFER_LMA);
2513
2514 selector = GET_SMSTATE(u32, smbase, 0x7e90);
2515 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, 0x7e92) << 8);
2516 set_desc_limit(&desc, GET_SMSTATE(u32, smbase, 0x7e94));
2517 set_desc_base(&desc, GET_SMSTATE(u32, smbase, 0x7e98));
2518 base3 = GET_SMSTATE(u32, smbase, 0x7e9c);
2519 ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_TR);
2520
2521 dt.size = GET_SMSTATE(u32, smbase, 0x7e84);
2522 dt.address = GET_SMSTATE(u64, smbase, 0x7e88);
2523 ctxt->ops->set_idt(ctxt, &dt);
2524
2525 selector = GET_SMSTATE(u32, smbase, 0x7e70);
2526 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, 0x7e72) << 8);
2527 set_desc_limit(&desc, GET_SMSTATE(u32, smbase, 0x7e74));
2528 set_desc_base(&desc, GET_SMSTATE(u32, smbase, 0x7e78));
2529 base3 = GET_SMSTATE(u32, smbase, 0x7e7c);
2530 ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_LDTR);
2531
2532 dt.size = GET_SMSTATE(u32, smbase, 0x7e64);
2533 dt.address = GET_SMSTATE(u64, smbase, 0x7e68);
2534 ctxt->ops->set_gdt(ctxt, &dt);
2535
2536 r = rsm_enter_protected_mode(ctxt, cr0, cr4);
2537 if (r != X86EMUL_CONTINUE)
2538 return r;
2539
2540 for (i = 0; i < 6; i++) {
2541 r = rsm_load_seg_64(ctxt, smbase, i);
2542 if (r != X86EMUL_CONTINUE)
2543 return r;
2544 }
2545
2546 return X86EMUL_CONTINUE;
2547 }
2548
2549 static int em_rsm(struct x86_emulate_ctxt *ctxt)
2550 {
2551 unsigned long cr0, cr4, efer;
2552 u64 smbase;
2553 int ret;
2554
2555 if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_MASK) == 0)
2556 return emulate_ud(ctxt);
2557
2558 /*
2559 * Get back to real mode, to prepare a safe state in which to load
2560 * CR0/CR3/CR4/EFER. It's all a bit more complicated if the vCPU
2561 * supports long mode.
2562 */
2563 cr4 = ctxt->ops->get_cr(ctxt, 4);
2564 if (emulator_has_longmode(ctxt)) {
2565 struct desc_struct cs_desc;
2566
2567 /* Zero CR4.PCIDE before CR0.PG. */
2568 if (cr4 & X86_CR4_PCIDE) {
2569 ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
2570 cr4 &= ~X86_CR4_PCIDE;
2571 }
2572
2573 /* A 32-bit code segment is required to clear EFER.LMA. */
2574 memset(&cs_desc, 0, sizeof(cs_desc));
2575 cs_desc.type = 0xb;
2576 cs_desc.s = cs_desc.g = cs_desc.p = 1;
2577 ctxt->ops->set_segment(ctxt, 0, &cs_desc, 0, VCPU_SREG_CS);
2578 }
2579
2580 /* For the 64-bit case, this will clear EFER.LMA. */
2581 cr0 = ctxt->ops->get_cr(ctxt, 0);
2582 if (cr0 & X86_CR0_PE)
2583 ctxt->ops->set_cr(ctxt, 0, cr0 & ~(X86_CR0_PG | X86_CR0_PE));
2584
2585 /* Now clear CR4.PAE (which must be done before clearing EFER.LME). */
2586 if (cr4 & X86_CR4_PAE)
2587 ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PAE);
2588
2589 /* And finally go back to 32-bit mode. */
2590 efer = 0;
2591 ctxt->ops->set_msr(ctxt, MSR_EFER, efer);
2592
2593 smbase = ctxt->ops->get_smbase(ctxt);
2594 if (emulator_has_longmode(ctxt))
2595 ret = rsm_load_state_64(ctxt, smbase + 0x8000);
2596 else
2597 ret = rsm_load_state_32(ctxt, smbase + 0x8000);
2598
2599 if (ret != X86EMUL_CONTINUE) {
2600 /* FIXME: should triple fault */
2601 return X86EMUL_UNHANDLEABLE;
2602 }
2603
2604 if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_INSIDE_NMI_MASK) == 0)
2605 ctxt->ops->set_nmi_mask(ctxt, false);
2606
2607 ctxt->ops->set_hflags(ctxt, ctxt->ops->get_hflags(ctxt) &
2608 ~(X86EMUL_SMM_INSIDE_NMI_MASK | X86EMUL_SMM_MASK));
2609 return X86EMUL_CONTINUE;
2610 }
2611
2612 static void
2613 setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
2614 struct desc_struct *cs, struct desc_struct *ss)
2615 {
2616 cs->l = 0; /* will be adjusted later */
2617 set_desc_base(cs, 0); /* flat segment */
2618 cs->g = 1; /* 4kb granularity */
2619 set_desc_limit(cs, 0xfffff); /* 4GB limit */
2620 cs->type = 0x0b; /* Read, Execute, Accessed */
2621 cs->s = 1;
2622 cs->dpl = 0; /* will be adjusted later */
2623 cs->p = 1;
2624 cs->d = 1;
2625 cs->avl = 0;
2626
2627 set_desc_base(ss, 0); /* flat segment */
2628 set_desc_limit(ss, 0xfffff); /* 4GB limit */
2629 ss->g = 1; /* 4kb granularity */
2630 ss->s = 1;
2631 ss->type = 0x03; /* Read/Write, Accessed */
2632 ss->d = 1; /* 32bit stack segment */
2633 ss->dpl = 0;
2634 ss->p = 1;
2635 ss->l = 0;
2636 ss->avl = 0;
2637 }
2638
2639 static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
2640 {
2641 u32 eax, ebx, ecx, edx;
2642
2643 eax = ecx = 0;
2644 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
2645 return ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
2646 && ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
2647 && edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx;
2648 }
2649
2650 static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
2651 {
2652 const struct x86_emulate_ops *ops = ctxt->ops;
2653 u32 eax, ebx, ecx, edx;
2654
2655 /*
2656 * syscall should always be enabled in longmode - so only become
2657 * vendor specific (cpuid) if other modes are active...
2658 */
2659 if (ctxt->mode == X86EMUL_MODE_PROT64)
2660 return true;
2661
2662 eax = 0x00000000;
2663 ecx = 0x00000000;
2664 ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
2665 /*
2666 * Intel ("GenuineIntel")
2667 * remark: Intel CPUs only support "syscall" in 64bit
2668 * longmode. Also an 64bit guest with a
2669 * 32bit compat-app running will #UD !! While this
2670 * behaviour can be fixed (by emulating) into AMD
2671 * response - CPUs of AMD can't behave like Intel.
2672 */
2673 if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx &&
2674 ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
2675 edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx)
2676 return false;
2677
2678 /* AMD ("AuthenticAMD") */
2679 if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
2680 ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
2681 edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
2682 return true;
2683
2684 /* AMD ("AMDisbetter!") */
2685 if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
2686 ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
2687 edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
2688 return true;
2689
2690 /* default: (not Intel, not AMD), apply Intel's stricter rules... */
2691 return false;
2692 }
2693
2694 static int em_syscall(struct x86_emulate_ctxt *ctxt)
2695 {
2696 const struct x86_emulate_ops *ops = ctxt->ops;
2697 struct desc_struct cs, ss;
2698 u64 msr_data;
2699 u16 cs_sel, ss_sel;
2700 u64 efer = 0;
2701
2702 /* syscall is not available in real mode */
2703 if (ctxt->mode == X86EMUL_MODE_REAL ||
2704 ctxt->mode == X86EMUL_MODE_VM86)
2705 return emulate_ud(ctxt);
2706
2707 if (!(em_syscall_is_enabled(ctxt)))
2708 return emulate_ud(ctxt);
2709
2710 ops->get_msr(ctxt, MSR_EFER, &efer);
2711 setup_syscalls_segments(ctxt, &cs, &ss);
2712
2713 if (!(efer & EFER_SCE))
2714 return emulate_ud(ctxt);
2715
2716 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2717 msr_data >>= 32;
2718 cs_sel = (u16)(msr_data & 0xfffc);
2719 ss_sel = (u16)(msr_data + 8);
2720
2721 if (efer & EFER_LMA) {
2722 cs.d = 0;
2723 cs.l = 1;
2724 }
2725 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2726 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2727
2728 *reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip;
2729 if (efer & EFER_LMA) {
2730 #ifdef CONFIG_X86_64
2731 *reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags;
2732
2733 ops->get_msr(ctxt,
2734 ctxt->mode == X86EMUL_MODE_PROT64 ?
2735 MSR_LSTAR : MSR_CSTAR, &msr_data);
2736 ctxt->_eip = msr_data;
2737
2738 ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
2739 ctxt->eflags &= ~msr_data;
2740 ctxt->eflags |= X86_EFLAGS_FIXED;
2741 #endif
2742 } else {
2743 /* legacy mode */
2744 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2745 ctxt->_eip = (u32)msr_data;
2746
2747 ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
2748 }
2749
2750 ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0;
2751 return X86EMUL_CONTINUE;
2752 }
2753
2754 static int em_sysenter(struct x86_emulate_ctxt *ctxt)
2755 {
2756 const struct x86_emulate_ops *ops = ctxt->ops;
2757 struct desc_struct cs, ss;
2758 u64 msr_data;
2759 u16 cs_sel, ss_sel;
2760 u64 efer = 0;
2761
2762 ops->get_msr(ctxt, MSR_EFER, &efer);
2763 /* inject #GP if in real mode */
2764 if (ctxt->mode == X86EMUL_MODE_REAL)
2765 return emulate_gp(ctxt, 0);
2766
2767 /*
2768 * Not recognized on AMD in compat mode (but is recognized in legacy
2769 * mode).
2770 */
2771 if ((ctxt->mode != X86EMUL_MODE_PROT64) && (efer & EFER_LMA)
2772 && !vendor_intel(ctxt))
2773 return emulate_ud(ctxt);
2774
2775 /* sysenter/sysexit have not been tested in 64bit mode. */
2776 if (ctxt->mode == X86EMUL_MODE_PROT64)
2777 return X86EMUL_UNHANDLEABLE;
2778
2779 setup_syscalls_segments(ctxt, &cs, &ss);
2780
2781 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2782 if ((msr_data & 0xfffc) == 0x0)
2783 return emulate_gp(ctxt, 0);
2784
2785 ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
2786 cs_sel = (u16)msr_data & ~SEGMENT_RPL_MASK;
2787 ss_sel = cs_sel + 8;
2788 if (efer & EFER_LMA) {
2789 cs.d = 0;
2790 cs.l = 1;
2791 }
2792
2793 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2794 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2795
2796 ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
2797 ctxt->_eip = (efer & EFER_LMA) ? msr_data : (u32)msr_data;
2798
2799 ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
2800 *reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data :
2801 (u32)msr_data;
2802
2803 return X86EMUL_CONTINUE;
2804 }
2805
2806 static int em_sysexit(struct x86_emulate_ctxt *ctxt)
2807 {
2808 const struct x86_emulate_ops *ops = ctxt->ops;
2809 struct desc_struct cs, ss;
2810 u64 msr_data, rcx, rdx;
2811 int usermode;
2812 u16 cs_sel = 0, ss_sel = 0;
2813
2814 /* inject #GP if in real mode or Virtual 8086 mode */
2815 if (ctxt->mode == X86EMUL_MODE_REAL ||
2816 ctxt->mode == X86EMUL_MODE_VM86)
2817 return emulate_gp(ctxt, 0);
2818
2819 setup_syscalls_segments(ctxt, &cs, &ss);
2820
2821 if ((ctxt->rex_prefix & 0x8) != 0x0)
2822 usermode = X86EMUL_MODE_PROT64;
2823 else
2824 usermode = X86EMUL_MODE_PROT32;
2825
2826 rcx = reg_read(ctxt, VCPU_REGS_RCX);
2827 rdx = reg_read(ctxt, VCPU_REGS_RDX);
2828
2829 cs.dpl = 3;
2830 ss.dpl = 3;
2831 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2832 switch (usermode) {
2833 case X86EMUL_MODE_PROT32:
2834 cs_sel = (u16)(msr_data + 16);
2835 if ((msr_data & 0xfffc) == 0x0)
2836 return emulate_gp(ctxt, 0);
2837 ss_sel = (u16)(msr_data + 24);
2838 rcx = (u32)rcx;
2839 rdx = (u32)rdx;
2840 break;
2841 case X86EMUL_MODE_PROT64:
2842 cs_sel = (u16)(msr_data + 32);
2843 if (msr_data == 0x0)
2844 return emulate_gp(ctxt, 0);
2845 ss_sel = cs_sel + 8;
2846 cs.d = 0;
2847 cs.l = 1;
2848 if (emul_is_noncanonical_address(rcx, ctxt) ||
2849 emul_is_noncanonical_address(rdx, ctxt))
2850 return emulate_gp(ctxt, 0);
2851 break;
2852 }
2853 cs_sel |= SEGMENT_RPL_MASK;
2854 ss_sel |= SEGMENT_RPL_MASK;
2855
2856 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2857 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2858
2859 ctxt->_eip = rdx;
2860 *reg_write(ctxt, VCPU_REGS_RSP) = rcx;
2861
2862 return X86EMUL_CONTINUE;
2863 }
2864
2865 static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
2866 {
2867 int iopl;
2868 if (ctxt->mode == X86EMUL_MODE_REAL)
2869 return false;
2870 if (ctxt->mode == X86EMUL_MODE_VM86)
2871 return true;
2872 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
2873 return ctxt->ops->cpl(ctxt) > iopl;
2874 }
2875
2876 static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
2877 u16 port, u16 len)
2878 {
2879 const struct x86_emulate_ops *ops = ctxt->ops;
2880 struct desc_struct tr_seg;
2881 u32 base3;
2882 int r;
2883 u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
2884 unsigned mask = (1 << len) - 1;
2885 unsigned long base;
2886
2887 ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
2888 if (!tr_seg.p)
2889 return false;
2890 if (desc_limit_scaled(&tr_seg) < 103)
2891 return false;
2892 base = get_desc_base(&tr_seg);
2893 #ifdef CONFIG_X86_64
2894 base |= ((u64)base3) << 32;
2895 #endif
2896 r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL);
2897 if (r != X86EMUL_CONTINUE)
2898 return false;
2899 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
2900 return false;
2901 r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL);
2902 if (r != X86EMUL_CONTINUE)
2903 return false;
2904 if ((perm >> bit_idx) & mask)
2905 return false;
2906 return true;
2907 }
2908
2909 static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
2910 u16 port, u16 len)
2911 {
2912 if (ctxt->perm_ok)
2913 return true;
2914
2915 if (emulator_bad_iopl(ctxt))
2916 if (!emulator_io_port_access_allowed(ctxt, port, len))
2917 return false;
2918
2919 ctxt->perm_ok = true;
2920
2921 return true;
2922 }
2923
2924 static void string_registers_quirk(struct x86_emulate_ctxt *ctxt)
2925 {
2926 /*
2927 * Intel CPUs mask the counter and pointers in quite strange
2928 * manner when ECX is zero due to REP-string optimizations.
2929 */
2930 #ifdef CONFIG_X86_64
2931 if (ctxt->ad_bytes != 4 || !vendor_intel(ctxt))
2932 return;
2933
2934 *reg_write(ctxt, VCPU_REGS_RCX) = 0;
2935
2936 switch (ctxt->b) {
2937 case 0xa4: /* movsb */
2938 case 0xa5: /* movsd/w */
2939 *reg_rmw(ctxt, VCPU_REGS_RSI) &= (u32)-1;
2940 /* fall through */
2941 case 0xaa: /* stosb */
2942 case 0xab: /* stosd/w */
2943 *reg_rmw(ctxt, VCPU_REGS_RDI) &= (u32)-1;
2944 }
2945 #endif
2946 }
2947
2948 static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
2949 struct tss_segment_16 *tss)
2950 {
2951 tss->ip = ctxt->_eip;
2952 tss->flag = ctxt->eflags;
2953 tss->ax = reg_read(ctxt, VCPU_REGS_RAX);
2954 tss->cx = reg_read(ctxt, VCPU_REGS_RCX);
2955 tss->dx = reg_read(ctxt, VCPU_REGS_RDX);
2956 tss->bx = reg_read(ctxt, VCPU_REGS_RBX);
2957 tss->sp = reg_read(ctxt, VCPU_REGS_RSP);
2958 tss->bp = reg_read(ctxt, VCPU_REGS_RBP);
2959 tss->si = reg_read(ctxt, VCPU_REGS_RSI);
2960 tss->di = reg_read(ctxt, VCPU_REGS_RDI);
2961
2962 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2963 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2964 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2965 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2966 tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
2967 }
2968
2969 static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
2970 struct tss_segment_16 *tss)
2971 {
2972 int ret;
2973 u8 cpl;
2974
2975 ctxt->_eip = tss->ip;
2976 ctxt->eflags = tss->flag | 2;
2977 *reg_write(ctxt, VCPU_REGS_RAX) = tss->ax;
2978 *reg_write(ctxt, VCPU_REGS_RCX) = tss->cx;
2979 *reg_write(ctxt, VCPU_REGS_RDX) = tss->dx;
2980 *reg_write(ctxt, VCPU_REGS_RBX) = tss->bx;
2981 *reg_write(ctxt, VCPU_REGS_RSP) = tss->sp;
2982 *reg_write(ctxt, VCPU_REGS_RBP) = tss->bp;
2983 *reg_write(ctxt, VCPU_REGS_RSI) = tss->si;
2984 *reg_write(ctxt, VCPU_REGS_RDI) = tss->di;
2985
2986 /*
2987 * SDM says that segment selectors are loaded before segment
2988 * descriptors
2989 */
2990 set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
2991 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2992 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2993 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2994 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2995
2996 cpl = tss->cs & 3;
2997
2998 /*
2999 * Now load segment descriptors. If fault happens at this stage
3000 * it is handled in a context of new task
3001 */
3002 ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
3003 X86_TRANSFER_TASK_SWITCH, NULL);
3004 if (ret != X86EMUL_CONTINUE)
3005 return ret;
3006 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
3007 X86_TRANSFER_TASK_SWITCH, NULL);
3008 if (ret != X86EMUL_CONTINUE)
3009 return ret;
3010 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
3011 X86_TRANSFER_TASK_SWITCH, NULL);
3012 if (ret != X86EMUL_CONTINUE)
3013 return ret;
3014 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
3015 X86_TRANSFER_TASK_SWITCH, NULL);
3016 if (ret != X86EMUL_CONTINUE)
3017 return ret;
3018 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
3019 X86_TRANSFER_TASK_SWITCH, NULL);
3020 if (ret != X86EMUL_CONTINUE)
3021 return ret;
3022
3023 return X86EMUL_CONTINUE;
3024 }
3025
3026 static int task_switch_16(struct x86_emulate_ctxt *ctxt,
3027 u16 tss_selector, u16 old_tss_sel,
3028 ulong old_tss_base, struct desc_struct *new_desc)
3029 {
3030 const struct x86_emulate_ops *ops = ctxt->ops;
3031 struct tss_segment_16 tss_seg;
3032 int ret;
3033 u32 new_tss_base = get_desc_base(new_desc);
3034
3035 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
3036 &ctxt->exception);
3037 if (ret != X86EMUL_CONTINUE)
3038 return ret;
3039
3040 save_state_to_tss16(ctxt, &tss_seg);
3041
3042 ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
3043 &ctxt->exception);
3044 if (ret != X86EMUL_CONTINUE)
3045 return ret;
3046
3047 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
3048 &ctxt->exception);
3049 if (ret != X86EMUL_CONTINUE)
3050 return ret;
3051
3052 if (old_tss_sel != 0xffff) {
3053 tss_seg.prev_task_link = old_tss_sel;
3054
3055 ret = ops->write_std(ctxt, new_tss_base,
3056 &tss_seg.prev_task_link,
3057 sizeof tss_seg.prev_task_link,
3058 &ctxt->exception);
3059 if (ret != X86EMUL_CONTINUE)
3060 return ret;
3061 }
3062
3063 return load_state_from_tss16(ctxt, &tss_seg);
3064 }
3065
3066 static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
3067 struct tss_segment_32 *tss)
3068 {
3069 /* CR3 and ldt selector are not saved intentionally */
3070 tss->eip = ctxt->_eip;
3071 tss->eflags = ctxt->eflags;
3072 tss->eax = reg_read(ctxt, VCPU_REGS_RAX);
3073 tss->ecx = reg_read(ctxt, VCPU_REGS_RCX);
3074 tss->edx = reg_read(ctxt, VCPU_REGS_RDX);
3075 tss->ebx = reg_read(ctxt, VCPU_REGS_RBX);
3076 tss->esp = reg_read(ctxt, VCPU_REGS_RSP);
3077 tss->ebp = reg_read(ctxt, VCPU_REGS_RBP);
3078 tss->esi = reg_read(ctxt, VCPU_REGS_RSI);
3079 tss->edi = reg_read(ctxt, VCPU_REGS_RDI);
3080
3081 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
3082 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
3083 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
3084 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
3085 tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
3086 tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
3087 }
3088
3089 static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
3090 struct tss_segment_32 *tss)
3091 {
3092 int ret;
3093 u8 cpl;
3094
3095 if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
3096 return emulate_gp(ctxt, 0);
3097 ctxt->_eip = tss->eip;
3098 ctxt->eflags = tss->eflags | 2;
3099
3100 /* General purpose registers */
3101 *reg_write(ctxt, VCPU_REGS_RAX) = tss->eax;
3102 *reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx;
3103 *reg_write(ctxt, VCPU_REGS_RDX) = tss->edx;
3104 *reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx;
3105 *reg_write(ctxt, VCPU_REGS_RSP) = tss->esp;
3106 *reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp;
3107 *reg_write(ctxt, VCPU_REGS_RSI) = tss->esi;
3108 *reg_write(ctxt, VCPU_REGS_RDI) = tss->edi;
3109
3110 /*
3111 * SDM says that segment selectors are loaded before segment
3112 * descriptors. This is important because CPL checks will
3113 * use CS.RPL.
3114 */
3115 set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
3116 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
3117 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
3118 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
3119 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
3120 set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
3121 set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
3122
3123 /*
3124 * If we're switching between Protected Mode and VM86, we need to make
3125 * sure to update the mode before loading the segment descriptors so
3126 * that the selectors are interpreted correctly.
3127 */
3128 if (ctxt->eflags & X86_EFLAGS_VM) {
3129 ctxt->mode = X86EMUL_MODE_VM86;
3130 cpl = 3;
3131 } else {
3132 ctxt->mode = X86EMUL_MODE_PROT32;
3133 cpl = tss->cs & 3;
3134 }
3135
3136 /*
3137 * Now load segment descriptors. If fault happenes at this stage
3138 * it is handled in a context of new task
3139 */
3140 ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
3141 cpl, X86_TRANSFER_TASK_SWITCH, NULL);
3142 if (ret != X86EMUL_CONTINUE)
3143 return ret;
3144 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
3145 X86_TRANSFER_TASK_SWITCH, NULL);
3146 if (ret != X86EMUL_CONTINUE)
3147 return ret;
3148 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
3149 X86_TRANSFER_TASK_SWITCH, NULL);
3150 if (ret != X86EMUL_CONTINUE)
3151 return ret;
3152 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
3153 X86_TRANSFER_TASK_SWITCH, NULL);
3154 if (ret != X86EMUL_CONTINUE)
3155 return ret;
3156 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
3157 X86_TRANSFER_TASK_SWITCH, NULL);
3158 if (ret != X86EMUL_CONTINUE)
3159 return ret;
3160 ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
3161 X86_TRANSFER_TASK_SWITCH, NULL);
3162 if (ret != X86EMUL_CONTINUE)
3163 return ret;
3164 ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
3165 X86_TRANSFER_TASK_SWITCH, NULL);
3166
3167 return ret;
3168 }
3169
3170 static int task_switch_32(struct x86_emulate_ctxt *ctxt,
3171 u16 tss_selector, u16 old_tss_sel,
3172 ulong old_tss_base, struct desc_struct *new_desc)
3173 {
3174 const struct x86_emulate_ops *ops = ctxt->ops;
3175 struct tss_segment_32 tss_seg;
3176 int ret;
3177 u32 new_tss_base = get_desc_base(new_desc);
3178 u32 eip_offset = offsetof(struct tss_segment_32, eip);
3179 u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
3180
3181 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
3182 &ctxt->exception);
3183 if (ret != X86EMUL_CONTINUE)
3184 return ret;
3185
3186 save_state_to_tss32(ctxt, &tss_seg);
3187
3188 /* Only GP registers and segment selectors are saved */
3189 ret = ops->write_std(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
3190 ldt_sel_offset - eip_offset, &ctxt->exception);
3191 if (ret != X86EMUL_CONTINUE)
3192 return ret;
3193
3194 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
3195 &ctxt->exception);
3196 if (ret != X86EMUL_CONTINUE)
3197 return ret;
3198
3199 if (old_tss_sel != 0xffff) {
3200 tss_seg.prev_task_link = old_tss_sel;
3201
3202 ret = ops->write_std(ctxt, new_tss_base,
3203 &tss_seg.prev_task_link,
3204 sizeof tss_seg.prev_task_link,
3205 &ctxt->exception);
3206 if (ret != X86EMUL_CONTINUE)
3207 return ret;
3208 }
3209
3210 return load_state_from_tss32(ctxt, &tss_seg);
3211 }
3212
3213 static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
3214 u16 tss_selector, int idt_index, int reason,
3215 bool has_error_code, u32 error_code)
3216 {
3217 const struct x86_emulate_ops *ops = ctxt->ops;
3218 struct desc_struct curr_tss_desc, next_tss_desc;
3219 int ret;
3220 u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
3221 ulong old_tss_base =
3222 ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
3223 u32 desc_limit;
3224 ulong desc_addr, dr7;
3225
3226 /* FIXME: old_tss_base == ~0 ? */
3227
3228 ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr);
3229 if (ret != X86EMUL_CONTINUE)
3230 return ret;
3231 ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr);
3232 if (ret != X86EMUL_CONTINUE)
3233 return ret;
3234
3235 /* FIXME: check that next_tss_desc is tss */
3236
3237 /*
3238 * Check privileges. The three cases are task switch caused by...
3239 *
3240 * 1. jmp/call/int to task gate: Check against DPL of the task gate
3241 * 2. Exception/IRQ/iret: No check is performed
3242 * 3. jmp/call to TSS/task-gate: No check is performed since the
3243 * hardware checks it before exiting.
3244 */
3245 if (reason == TASK_SWITCH_GATE) {
3246 if (idt_index != -1) {
3247 /* Software interrupts */
3248 struct desc_struct task_gate_desc;
3249 int dpl;
3250
3251 ret = read_interrupt_descriptor(ctxt, idt_index,
3252 &task_gate_desc);
3253 if (ret != X86EMUL_CONTINUE)
3254 return ret;
3255
3256 dpl = task_gate_desc.dpl;
3257 if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
3258 return emulate_gp(ctxt, (idt_index << 3) | 0x2);
3259 }
3260 }
3261
3262 desc_limit = desc_limit_scaled(&next_tss_desc);
3263 if (!next_tss_desc.p ||
3264 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
3265 desc_limit < 0x2b)) {
3266 return emulate_ts(ctxt, tss_selector & 0xfffc);
3267 }
3268
3269 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
3270 curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
3271 write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
3272 }
3273
3274 if (reason == TASK_SWITCH_IRET)
3275 ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
3276
3277 /* set back link to prev task only if NT bit is set in eflags
3278 note that old_tss_sel is not used after this point */
3279 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
3280 old_tss_sel = 0xffff;
3281
3282 if (next_tss_desc.type & 8)
3283 ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
3284 old_tss_base, &next_tss_desc);
3285 else
3286 ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
3287 old_tss_base, &next_tss_desc);
3288 if (ret != X86EMUL_CONTINUE)
3289 return ret;
3290
3291 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
3292 ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
3293
3294 if (reason != TASK_SWITCH_IRET) {
3295 next_tss_desc.type |= (1 << 1); /* set busy flag */
3296 write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
3297 }
3298
3299 ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS);
3300 ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
3301
3302 if (has_error_code) {
3303 ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
3304 ctxt->lock_prefix = 0;
3305 ctxt->src.val = (unsigned long) error_code;
3306 ret = em_push(ctxt);
3307 }
3308
3309 ops->get_dr(ctxt, 7, &dr7);
3310 ops->set_dr(ctxt, 7, dr7 & ~(DR_LOCAL_ENABLE_MASK | DR_LOCAL_SLOWDOWN));
3311
3312 return ret;
3313 }
3314
3315 int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
3316 u16 tss_selector, int idt_index, int reason,
3317 bool has_error_code, u32 error_code)
3318 {
3319 int rc;
3320
3321 invalidate_registers(ctxt);
3322 ctxt->_eip = ctxt->eip;
3323 ctxt->dst.type = OP_NONE;
3324
3325 rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
3326 has_error_code, error_code);
3327
3328 if (rc == X86EMUL_CONTINUE) {
3329 ctxt->eip = ctxt->_eip;
3330 writeback_registers(ctxt);
3331 }
3332
3333 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
3334 }
3335
3336 static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
3337 struct operand *op)
3338 {
3339 int df = (ctxt->eflags & X86_EFLAGS_DF) ? -op->count : op->count;
3340
3341 register_address_increment(ctxt, reg, df * op->bytes);
3342 op->addr.mem.ea = register_address(ctxt, reg);
3343 }
3344
3345 static int em_das(struct x86_emulate_ctxt *ctxt)
3346 {
3347 u8 al, old_al;
3348 bool af, cf, old_cf;
3349
3350 cf = ctxt->eflags & X86_EFLAGS_CF;
3351 al = ctxt->dst.val;
3352
3353 old_al = al;
3354 old_cf = cf;
3355 cf = false;
3356 af = ctxt->eflags & X86_EFLAGS_AF;
3357 if ((al & 0x0f) > 9 || af) {
3358 al -= 6;
3359 cf = old_cf | (al >= 250);
3360 af = true;
3361 } else {
3362 af = false;
3363 }
3364 if (old_al > 0x99 || old_cf) {
3365 al -= 0x60;
3366 cf = true;
3367 }
3368
3369 ctxt->dst.val = al;
3370 /* Set PF, ZF, SF */
3371 ctxt->src.type = OP_IMM;
3372 ctxt->src.val = 0;
3373 ctxt->src.bytes = 1;
3374 fastop(ctxt, em_or);
3375 ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
3376 if (cf)
3377 ctxt->eflags |= X86_EFLAGS_CF;
3378 if (af)
3379 ctxt->eflags |= X86_EFLAGS_AF;
3380 return X86EMUL_CONTINUE;
3381 }
3382
3383 static int em_aam(struct x86_emulate_ctxt *ctxt)
3384 {
3385 u8 al, ah;
3386
3387 if (ctxt->src.val == 0)
3388 return emulate_de(ctxt);
3389
3390 al = ctxt->dst.val & 0xff;
3391 ah = al / ctxt->src.val;
3392 al %= ctxt->src.val;
3393
3394 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);
3395
3396 /* Set PF, ZF, SF */
3397 ctxt->src.type = OP_IMM;
3398 ctxt->src.val = 0;
3399 ctxt->src.bytes = 1;
3400 fastop(ctxt, em_or);
3401
3402 return X86EMUL_CONTINUE;
3403 }
3404
3405 static int em_aad(struct x86_emulate_ctxt *ctxt)
3406 {
3407 u8 al = ctxt->dst.val & 0xff;
3408 u8 ah = (ctxt->dst.val >> 8) & 0xff;
3409
3410 al = (al + (ah * ctxt->src.val)) & 0xff;
3411
3412 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al;
3413
3414 /* Set PF, ZF, SF */
3415 ctxt->src.type = OP_IMM;
3416 ctxt->src.val = 0;
3417 ctxt->src.bytes = 1;
3418 fastop(ctxt, em_or);
3419
3420 return X86EMUL_CONTINUE;
3421 }
3422
3423 static int em_call(struct x86_emulate_ctxt *ctxt)
3424 {
3425 int rc;
3426 long rel = ctxt->src.val;
3427
3428 ctxt->src.val = (unsigned long)ctxt->_eip;
3429 rc = jmp_rel(ctxt, rel);
3430 if (rc != X86EMUL_CONTINUE)
3431 return rc;
3432 return em_push(ctxt);
3433 }
3434
3435 static int em_call_far(struct x86_emulate_ctxt *ctxt)
3436 {
3437 u16 sel, old_cs;
3438 ulong old_eip;
3439 int rc;
3440 struct desc_struct old_desc, new_desc;
3441 const struct x86_emulate_ops *ops = ctxt->ops;
3442 int cpl = ctxt->ops->cpl(ctxt);
3443 enum x86emul_mode prev_mode = ctxt->mode;
3444
3445 old_eip = ctxt->_eip;
3446 ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
3447
3448 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
3449 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
3450 X86_TRANSFER_CALL_JMP, &new_desc);
3451 if (rc != X86EMUL_CONTINUE)
3452 return rc;
3453
3454 rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
3455 if (rc != X86EMUL_CONTINUE)
3456 goto fail;
3457
3458 ctxt->src.val = old_cs;
3459 rc = em_push(ctxt);
3460 if (rc != X86EMUL_CONTINUE)
3461 goto fail;
3462
3463 ctxt->src.val = old_eip;
3464 rc = em_push(ctxt);
3465 /* If we failed, we tainted the memory, but the very least we should
3466 restore cs */
3467 if (rc != X86EMUL_CONTINUE) {
3468 pr_warn_once("faulting far call emulation tainted memory\n");
3469 goto fail;
3470 }
3471 return rc;
3472 fail:
3473 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
3474 ctxt->mode = prev_mode;
3475 return rc;
3476
3477 }
3478
3479 static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
3480 {
3481 int rc;
3482 unsigned long eip;
3483
3484 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
3485 if (rc != X86EMUL_CONTINUE)
3486 return rc;
3487 rc = assign_eip_near(ctxt, eip);
3488 if (rc != X86EMUL_CONTINUE)
3489 return rc;
3490 rsp_increment(ctxt, ctxt->src.val);
3491 return X86EMUL_CONTINUE;
3492 }
3493
3494 static int em_xchg(struct x86_emulate_ctxt *ctxt)
3495 {
3496 /* Write back the register source. */
3497 ctxt->src.val = ctxt->dst.val;
3498 write_register_operand(&ctxt->src);
3499
3500 /* Write back the memory destination with implicit LOCK prefix. */
3501 ctxt->dst.val = ctxt->src.orig_val;
3502 ctxt->lock_prefix = 1;
3503 return X86EMUL_CONTINUE;
3504 }
3505
3506 static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
3507 {
3508 ctxt->dst.val = ctxt->src2.val;
3509 return fastop(ctxt, em_imul);
3510 }
3511
3512 static int em_cwd(struct x86_emulate_ctxt *ctxt)
3513 {
3514 ctxt->dst.type = OP_REG;
3515 ctxt->dst.bytes = ctxt->src.bytes;
3516 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
3517 ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
3518
3519 return X86EMUL_CONTINUE;
3520 }
3521
3522 static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
3523 {
3524 u64 tsc = 0;
3525
3526 ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
3527 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc;
3528 *reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32;
3529 return X86EMUL_CONTINUE;
3530 }
3531
3532 static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
3533 {
3534 u64 pmc;
3535
3536 if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc))
3537 return emulate_gp(ctxt, 0);
3538 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc;
3539 *reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32;
3540 return X86EMUL_CONTINUE;
3541 }
3542
3543 static int em_mov(struct x86_emulate_ctxt *ctxt)
3544 {
3545 memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr));
3546 return X86EMUL_CONTINUE;
3547 }
3548
3549 #define FFL(x) bit(X86_FEATURE_##x)
3550
3551 static int em_movbe(struct x86_emulate_ctxt *ctxt)
3552 {
3553 u32 ebx, ecx, edx, eax = 1;
3554 u16 tmp;
3555
3556 /*
3557 * Check MOVBE is set in the guest-visible CPUID leaf.
3558 */
3559 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
3560 if (!(ecx & FFL(MOVBE)))
3561 return emulate_ud(ctxt);
3562
3563 switch (ctxt->op_bytes) {
3564 case 2:
3565 /*
3566 * From MOVBE definition: "...When the operand size is 16 bits,
3567 * the upper word of the destination register remains unchanged
3568 * ..."
3569 *
3570 * Both casting ->valptr and ->val to u16 breaks strict aliasing
3571 * rules so we have to do the operation almost per hand.
3572 */
3573 tmp = (u16)ctxt->src.val;
3574 ctxt->dst.val &= ~0xffffUL;
3575 ctxt->dst.val |= (unsigned long)swab16(tmp);
3576 break;
3577 case 4:
3578 ctxt->dst.val = swab32((u32)ctxt->src.val);
3579 break;
3580 case 8:
3581 ctxt->dst.val = swab64(ctxt->src.val);
3582 break;
3583 default:
3584 BUG();
3585 }
3586 return X86EMUL_CONTINUE;
3587 }
3588
3589 static int em_cr_write(struct x86_emulate_ctxt *ctxt)
3590 {
3591 if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
3592 return emulate_gp(ctxt, 0);
3593
3594 /* Disable writeback. */
3595 ctxt->dst.type = OP_NONE;
3596 return X86EMUL_CONTINUE;
3597 }
3598
3599 static int em_dr_write(struct x86_emulate_ctxt *ctxt)
3600 {
3601 unsigned long val;
3602
3603 if (ctxt->mode == X86EMUL_MODE_PROT64)
3604 val = ctxt->src.val & ~0ULL;
3605 else
3606 val = ctxt->src.val & ~0U;
3607
3608 /* #UD condition is already handled. */
3609 if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
3610 return emulate_gp(ctxt, 0);
3611
3612 /* Disable writeback. */
3613 ctxt->dst.type = OP_NONE;
3614 return X86EMUL_CONTINUE;
3615 }
3616
3617 static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
3618 {
3619 u64 msr_data;
3620
3621 msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
3622 | ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
3623 if (ctxt->ops->set_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), msr_data))
3624 return emulate_gp(ctxt, 0);
3625
3626 return X86EMUL_CONTINUE;
3627 }
3628
3629 static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
3630 {
3631 u64 msr_data;
3632
3633 if (ctxt->ops->get_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &msr_data))
3634 return emulate_gp(ctxt, 0);
3635
3636 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
3637 *reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32;
3638 return X86EMUL_CONTINUE;
3639 }
3640
3641 static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
3642 {
3643 if (ctxt->modrm_reg > VCPU_SREG_GS)
3644 return emulate_ud(ctxt);
3645
3646 ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg);
3647 if (ctxt->dst.bytes == 4 && ctxt->dst.type == OP_MEM)
3648 ctxt->dst.bytes = 2;
3649 return X86EMUL_CONTINUE;
3650 }
3651
3652 static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
3653 {
3654 u16 sel = ctxt->src.val;
3655
3656 if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
3657 return emulate_ud(ctxt);
3658
3659 if (ctxt->modrm_reg == VCPU_SREG_SS)
3660 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
3661
3662 /* Disable writeback. */
3663 ctxt->dst.type = OP_NONE;
3664 return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
3665 }
3666
3667 static int em_lldt(struct x86_emulate_ctxt *ctxt)
3668 {
3669 u16 sel = ctxt->src.val;
3670
3671 /* Disable writeback. */
3672 ctxt->dst.type = OP_NONE;
3673 return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
3674 }
3675
3676 static int em_ltr(struct x86_emulate_ctxt *ctxt)
3677 {
3678 u16 sel = ctxt->src.val;
3679
3680 /* Disable writeback. */
3681 ctxt->dst.type = OP_NONE;
3682 return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR);
3683 }
3684
3685 static int em_invlpg(struct x86_emulate_ctxt *ctxt)
3686 {
3687 int rc;
3688 ulong linear;
3689
3690 rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
3691 if (rc == X86EMUL_CONTINUE)
3692 ctxt->ops->invlpg(ctxt, linear);
3693 /* Disable writeback. */
3694 ctxt->dst.type = OP_NONE;
3695 return X86EMUL_CONTINUE;
3696 }
3697
3698 static int em_clts(struct x86_emulate_ctxt *ctxt)
3699 {
3700 ulong cr0;
3701
3702 cr0 = ctxt->ops->get_cr(ctxt, 0);
3703 cr0 &= ~X86_CR0_TS;
3704 ctxt->ops->set_cr(ctxt, 0, cr0);
3705 return X86EMUL_CONTINUE;
3706 }
3707
3708 static int em_hypercall(struct x86_emulate_ctxt *ctxt)
3709 {
3710 int rc = ctxt->ops->fix_hypercall(ctxt);
3711
3712 if (rc != X86EMUL_CONTINUE)
3713 return rc;
3714
3715 /* Let the processor re-execute the fixed hypercall */
3716 ctxt->_eip = ctxt->eip;
3717 /* Disable writeback. */
3718 ctxt->dst.type = OP_NONE;
3719 return X86EMUL_CONTINUE;
3720 }
3721
3722 static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
3723 void (*get)(struct x86_emulate_ctxt *ctxt,
3724 struct desc_ptr *ptr))
3725 {
3726 struct desc_ptr desc_ptr;
3727
3728 if (ctxt->mode == X86EMUL_MODE_PROT64)
3729 ctxt->op_bytes = 8;
3730 get(ctxt, &desc_ptr);
3731 if (ctxt->op_bytes == 2) {
3732 ctxt->op_bytes = 4;
3733 desc_ptr.address &= 0x00ffffff;
3734 }
3735 /* Disable writeback. */
3736 ctxt->dst.type = OP_NONE;
3737 return segmented_write_std(ctxt, ctxt->dst.addr.mem,
3738 &desc_ptr, 2 + ctxt->op_bytes);
3739 }
3740
3741 static int em_sgdt(struct x86_emulate_ctxt *ctxt)
3742 {
3743 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt);
3744 }
3745
3746 static int em_sidt(struct x86_emulate_ctxt *ctxt)
3747 {
3748 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
3749 }
3750
3751 static int em_lgdt_lidt(struct x86_emulate_ctxt *ctxt, bool lgdt)
3752 {
3753 struct desc_ptr desc_ptr;
3754 int rc;
3755
3756 if (ctxt->mode == X86EMUL_MODE_PROT64)
3757 ctxt->op_bytes = 8;
3758 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
3759 &desc_ptr.size, &desc_ptr.address,
3760 ctxt->op_bytes);
3761 if (rc != X86EMUL_CONTINUE)
3762 return rc;
3763 if (ctxt->mode == X86EMUL_MODE_PROT64 &&
3764 emul_is_noncanonical_address(desc_ptr.address, ctxt))
3765 return emulate_gp(ctxt, 0);
3766 if (lgdt)
3767 ctxt->ops->set_gdt(ctxt, &desc_ptr);
3768 else
3769 ctxt->ops->set_idt(ctxt, &desc_ptr);
3770 /* Disable writeback. */
3771 ctxt->dst.type = OP_NONE;
3772 return X86EMUL_CONTINUE;
3773 }
3774
3775 static int em_lgdt(struct x86_emulate_ctxt *ctxt)
3776 {
3777 return em_lgdt_lidt(ctxt, true);
3778 }
3779
3780 static int em_lidt(struct x86_emulate_ctxt *ctxt)
3781 {
3782 return em_lgdt_lidt(ctxt, false);
3783 }
3784
3785 static int em_smsw(struct x86_emulate_ctxt *ctxt)
3786 {
3787 if (ctxt->dst.type == OP_MEM)
3788 ctxt->dst.bytes = 2;
3789 ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
3790 return X86EMUL_CONTINUE;
3791 }
3792
3793 static int em_lmsw(struct x86_emulate_ctxt *ctxt)
3794 {
3795 ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
3796 | (ctxt->src.val & 0x0f));
3797 ctxt->dst.type = OP_NONE;
3798 return X86EMUL_CONTINUE;
3799 }
3800
3801 static int em_loop(struct x86_emulate_ctxt *ctxt)
3802 {
3803 int rc = X86EMUL_CONTINUE;
3804
3805 register_address_increment(ctxt, VCPU_REGS_RCX, -1);
3806 if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
3807 (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
3808 rc = jmp_rel(ctxt, ctxt->src.val);
3809
3810 return rc;
3811 }
3812
3813 static int em_jcxz(struct x86_emulate_ctxt *ctxt)
3814 {
3815 int rc = X86EMUL_CONTINUE;
3816
3817 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
3818 rc = jmp_rel(ctxt, ctxt->src.val);
3819
3820 return rc;
3821 }
3822
3823 static int em_in(struct x86_emulate_ctxt *ctxt)
3824 {
3825 if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
3826 &ctxt->dst.val))
3827 return X86EMUL_IO_NEEDED;
3828
3829 return X86EMUL_CONTINUE;
3830 }
3831
3832 static int em_out(struct x86_emulate_ctxt *ctxt)
3833 {
3834 ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
3835 &ctxt->src.val, 1);
3836 /* Disable writeback. */
3837 ctxt->dst.type = OP_NONE;
3838 return X86EMUL_CONTINUE;
3839 }
3840
3841 static int em_cli(struct x86_emulate_ctxt *ctxt)
3842 {
3843 if (emulator_bad_iopl(ctxt))
3844 return emulate_gp(ctxt, 0);
3845
3846 ctxt->eflags &= ~X86_EFLAGS_IF;
3847 return X86EMUL_CONTINUE;
3848 }
3849
3850 static int em_sti(struct x86_emulate_ctxt *ctxt)
3851 {
3852 if (emulator_bad_iopl(ctxt))
3853 return emulate_gp(ctxt, 0);
3854
3855 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
3856 ctxt->eflags |= X86_EFLAGS_IF;
3857 return X86EMUL_CONTINUE;
3858 }
3859
3860 static int em_cpuid(struct x86_emulate_ctxt *ctxt)
3861 {
3862 u32 eax, ebx, ecx, edx;
3863 u64 msr = 0;
3864
3865 ctxt->ops->get_msr(ctxt, MSR_MISC_FEATURES_ENABLES, &msr);
3866 if (msr & MSR_MISC_FEATURES_ENABLES_CPUID_FAULT &&
3867 ctxt->ops->cpl(ctxt)) {
3868 return emulate_gp(ctxt, 0);
3869 }
3870
3871 eax = reg_read(ctxt, VCPU_REGS_RAX);
3872 ecx = reg_read(ctxt, VCPU_REGS_RCX);
3873 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, true);
3874 *reg_write(ctxt, VCPU_REGS_RAX) = eax;
3875 *reg_write(ctxt, VCPU_REGS_RBX) = ebx;
3876 *reg_write(ctxt, VCPU_REGS_RCX) = ecx;
3877 *reg_write(ctxt, VCPU_REGS_RDX) = edx;
3878 return X86EMUL_CONTINUE;
3879 }
3880
3881 static int em_sahf(struct x86_emulate_ctxt *ctxt)
3882 {
3883 u32 flags;
3884
3885 flags = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
3886 X86_EFLAGS_SF;
3887 flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8;
3888
3889 ctxt->eflags &= ~0xffUL;
3890 ctxt->eflags |= flags | X86_EFLAGS_FIXED;
3891 return X86EMUL_CONTINUE;
3892 }
3893
3894 static int em_lahf(struct x86_emulate_ctxt *ctxt)
3895 {
3896 *reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL;
3897 *reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8;
3898 return X86EMUL_CONTINUE;
3899 }
3900
3901 static int em_bswap(struct x86_emulate_ctxt *ctxt)
3902 {
3903 switch (ctxt->op_bytes) {
3904 #ifdef CONFIG_X86_64
3905 case 8:
3906 asm("bswap %0" : "+r"(ctxt->dst.val));
3907 break;
3908 #endif
3909 default:
3910 asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val));
3911 break;
3912 }
3913 return X86EMUL_CONTINUE;
3914 }
3915
3916 static int em_clflush(struct x86_emulate_ctxt *ctxt)
3917 {
3918 /* emulating clflush regardless of cpuid */
3919 return X86EMUL_CONTINUE;
3920 }
3921
3922 static int em_movsxd(struct x86_emulate_ctxt *ctxt)
3923 {
3924 ctxt->dst.val = (s32) ctxt->src.val;
3925 return X86EMUL_CONTINUE;
3926 }
3927
3928 static int check_fxsr(struct x86_emulate_ctxt *ctxt)
3929 {
3930 u32 eax = 1, ebx, ecx = 0, edx;
3931
3932 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
3933 if (!(edx & FFL(FXSR)))
3934 return emulate_ud(ctxt);
3935
3936 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
3937 return emulate_nm(ctxt);
3938
3939 /*
3940 * Don't emulate a case that should never be hit, instead of working
3941 * around a lack of fxsave64/fxrstor64 on old compilers.
3942 */
3943 if (ctxt->mode >= X86EMUL_MODE_PROT64)
3944 return X86EMUL_UNHANDLEABLE;
3945
3946 return X86EMUL_CONTINUE;
3947 }
3948
3949 /*
3950 * Hardware doesn't save and restore XMM 0-7 without CR4.OSFXSR, but does save
3951 * and restore MXCSR.
3952 */
3953 static size_t __fxstate_size(int nregs)
3954 {
3955 return offsetof(struct fxregs_state, xmm_space[0]) + nregs * 16;
3956 }
3957
3958 static inline size_t fxstate_size(struct x86_emulate_ctxt *ctxt)
3959 {
3960 bool cr4_osfxsr;
3961 if (ctxt->mode == X86EMUL_MODE_PROT64)
3962 return __fxstate_size(16);
3963
3964 cr4_osfxsr = ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR;
3965 return __fxstate_size(cr4_osfxsr ? 8 : 0);
3966 }
3967
3968 /*
3969 * FXSAVE and FXRSTOR have 4 different formats depending on execution mode,
3970 * 1) 16 bit mode
3971 * 2) 32 bit mode
3972 * - like (1), but FIP and FDP (foo) are only 16 bit. At least Intel CPUs
3973 * preserve whole 32 bit values, though, so (1) and (2) are the same wrt.
3974 * save and restore
3975 * 3) 64-bit mode with REX.W prefix
3976 * - like (2), but XMM 8-15 are being saved and restored
3977 * 4) 64-bit mode without REX.W prefix
3978 * - like (3), but FIP and FDP are 64 bit
3979 *
3980 * Emulation uses (3) for (1) and (2) and preserves XMM 8-15 to reach the
3981 * desired result. (4) is not emulated.
3982 *
3983 * Note: Guest and host CPUID.(EAX=07H,ECX=0H):EBX[bit 13] (deprecate FPU CS
3984 * and FPU DS) should match.
3985 */
3986 static int em_fxsave(struct x86_emulate_ctxt *ctxt)
3987 {
3988 struct fxregs_state fx_state;
3989 int rc;
3990
3991 rc = check_fxsr(ctxt);
3992 if (rc != X86EMUL_CONTINUE)
3993 return rc;
3994
3995 ctxt->ops->get_fpu(ctxt);
3996
3997 rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state));
3998
3999 ctxt->ops->put_fpu(ctxt);
4000
4001 if (rc != X86EMUL_CONTINUE)
4002 return rc;
4003
4004 return segmented_write_std(ctxt, ctxt->memop.addr.mem, &fx_state,
4005 fxstate_size(ctxt));
4006 }
4007
4008 static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
4009 {
4010 struct fxregs_state fx_state;
4011 int rc;
4012 size_t size;
4013
4014 rc = check_fxsr(ctxt);
4015 if (rc != X86EMUL_CONTINUE)
4016 return rc;
4017
4018 ctxt->ops->get_fpu(ctxt);
4019
4020 size = fxstate_size(ctxt);
4021 if (size < __fxstate_size(16)) {
4022 rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state));
4023 if (rc != X86EMUL_CONTINUE)
4024 goto out;
4025 }
4026
4027 rc = segmented_read_std(ctxt, ctxt->memop.addr.mem, &fx_state, size);
4028 if (rc != X86EMUL_CONTINUE)
4029 goto out;
4030
4031 if (fx_state.mxcsr >> 16) {
4032 rc = emulate_gp(ctxt, 0);
4033 goto out;
4034 }
4035
4036 if (rc == X86EMUL_CONTINUE)
4037 rc = asm_safe("fxrstor %[fx]", : [fx] "m"(fx_state));
4038
4039 out:
4040 ctxt->ops->put_fpu(ctxt);
4041
4042 return rc;
4043 }
4044
4045 static bool valid_cr(int nr)
4046 {
4047 switch (nr) {
4048 case 0:
4049 case 2 ... 4:
4050 case 8:
4051 return true;
4052 default:
4053 return false;
4054 }
4055 }
4056
4057 static int check_cr_read(struct x86_emulate_ctxt *ctxt)
4058 {
4059 if (!valid_cr(ctxt->modrm_reg))
4060 return emulate_ud(ctxt);
4061
4062 return X86EMUL_CONTINUE;
4063 }
4064
4065 static int check_cr_write(struct x86_emulate_ctxt *ctxt)
4066 {
4067 u64 new_val = ctxt->src.val64;
4068 int cr = ctxt->modrm_reg;
4069 u64 efer = 0;
4070
4071 static u64 cr_reserved_bits[] = {
4072 0xffffffff00000000ULL,
4073 0, 0, 0, /* CR3 checked later */
4074 CR4_RESERVED_BITS,
4075 0, 0, 0,
4076 CR8_RESERVED_BITS,
4077 };
4078
4079 if (!valid_cr(cr))
4080 return emulate_ud(ctxt);
4081
4082 if (new_val & cr_reserved_bits[cr])
4083 return emulate_gp(ctxt, 0);
4084
4085 switch (cr) {
4086 case 0: {
4087 u64 cr4;
4088 if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
4089 ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
4090 return emulate_gp(ctxt, 0);
4091
4092 cr4 = ctxt->ops->get_cr(ctxt, 4);
4093 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4094
4095 if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
4096 !(cr4 & X86_CR4_PAE))
4097 return emulate_gp(ctxt, 0);
4098
4099 break;
4100 }
4101 case 3: {
4102 u64 rsvd = 0;
4103
4104 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4105 if (efer & EFER_LMA) {
4106 u64 maxphyaddr;
4107 u32 eax, ebx, ecx, edx;
4108
4109 eax = 0x80000008;
4110 ecx = 0;
4111 if (ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx,
4112 &edx, false))
4113 maxphyaddr = eax & 0xff;
4114 else
4115 maxphyaddr = 36;
4116 rsvd = rsvd_bits(maxphyaddr, 62);
4117 }
4118
4119 if (new_val & rsvd)
4120 return emulate_gp(ctxt, 0);
4121
4122 break;
4123 }
4124 case 4: {
4125 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4126
4127 if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
4128 return emulate_gp(ctxt, 0);
4129
4130 break;
4131 }
4132 }
4133
4134 return X86EMUL_CONTINUE;
4135 }
4136
4137 static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
4138 {
4139 unsigned long dr7;
4140
4141 ctxt->ops->get_dr(ctxt, 7, &dr7);
4142
4143 /* Check if DR7.Global_Enable is set */
4144 return dr7 & (1 << 13);
4145 }
4146
4147 static int check_dr_read(struct x86_emulate_ctxt *ctxt)
4148 {
4149 int dr = ctxt->modrm_reg;
4150 u64 cr4;
4151
4152 if (dr > 7)
4153 return emulate_ud(ctxt);
4154
4155 cr4 = ctxt->ops->get_cr(ctxt, 4);
4156 if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
4157 return emulate_ud(ctxt);
4158
4159 if (check_dr7_gd(ctxt)) {
4160 ulong dr6;
4161
4162 ctxt->ops->get_dr(ctxt, 6, &dr6);
4163 dr6 &= ~15;
4164 dr6 |= DR6_BD | DR6_RTM;
4165 ctxt->ops->set_dr(ctxt, 6, dr6);
4166 return emulate_db(ctxt);
4167 }
4168
4169 return X86EMUL_CONTINUE;
4170 }
4171
4172 static int check_dr_write(struct x86_emulate_ctxt *ctxt)
4173 {
4174 u64 new_val = ctxt->src.val64;
4175 int dr = ctxt->modrm_reg;
4176
4177 if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
4178 return emulate_gp(ctxt, 0);
4179
4180 return check_dr_read(ctxt);
4181 }
4182
4183 static int check_svme(struct x86_emulate_ctxt *ctxt)
4184 {
4185 u64 efer = 0;
4186
4187 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4188
4189 if (!(efer & EFER_SVME))
4190 return emulate_ud(ctxt);
4191
4192 return X86EMUL_CONTINUE;
4193 }
4194
4195 static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
4196 {
4197 u64 rax = reg_read(ctxt, VCPU_REGS_RAX);
4198
4199 /* Valid physical address? */
4200 if (rax & 0xffff000000000000ULL)
4201 return emulate_gp(ctxt, 0);
4202
4203 return check_svme(ctxt);
4204 }
4205
4206 static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
4207 {
4208 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
4209
4210 if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
4211 return emulate_ud(ctxt);
4212
4213 return X86EMUL_CONTINUE;
4214 }
4215
4216 static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
4217 {
4218 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
4219 u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
4220
4221 if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
4222 ctxt->ops->check_pmc(ctxt, rcx))
4223 return emulate_gp(ctxt, 0);
4224
4225 return X86EMUL_CONTINUE;
4226 }
4227
4228 static int check_perm_in(struct x86_emulate_ctxt *ctxt)
4229 {
4230 ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
4231 if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
4232 return emulate_gp(ctxt, 0);
4233
4234 return X86EMUL_CONTINUE;
4235 }
4236
4237 static int check_perm_out(struct x86_emulate_ctxt *ctxt)
4238 {
4239 ctxt->src.bytes = min(ctxt->src.bytes, 4u);
4240 if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
4241 return emulate_gp(ctxt, 0);
4242
4243 return X86EMUL_CONTINUE;
4244 }
4245
4246 #define D(_y) { .flags = (_y) }
4247 #define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
4248 #define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
4249 .intercept = x86_intercept_##_i, .check_perm = (_p) }
4250 #define N D(NotImpl)
4251 #define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
4252 #define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
4253 #define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
4254 #define ID(_f, _i) { .flags = ((_f) | InstrDual | ModRM), .u.idual = (_i) }
4255 #define MD(_f, _m) { .flags = ((_f) | ModeDual), .u.mdual = (_m) }
4256 #define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
4257 #define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
4258 #define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
4259 #define II(_f, _e, _i) \
4260 { .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
4261 #define IIP(_f, _e, _i, _p) \
4262 { .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
4263 .intercept = x86_intercept_##_i, .check_perm = (_p) }
4264 #define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
4265
4266 #define D2bv(_f) D((_f) | ByteOp), D(_f)
4267 #define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
4268 #define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
4269 #define F2bv(_f, _e) F((_f) | ByteOp, _e), F(_f, _e)
4270 #define I2bvIP(_f, _e, _i, _p) \
4271 IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
4272
4273 #define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e), \
4274 F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
4275 F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
4276
4277 static const struct opcode group7_rm0[] = {
4278 N,
4279 I(SrcNone | Priv | EmulateOnUD, em_hypercall),
4280 N, N, N, N, N, N,
4281 };
4282
4283 static const struct opcode group7_rm1[] = {
4284 DI(SrcNone | Priv, monitor),
4285 DI(SrcNone | Priv, mwait),
4286 N, N, N, N, N, N,
4287 };
4288
4289 static const struct opcode group7_rm3[] = {
4290 DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa),
4291 II(SrcNone | Prot | EmulateOnUD, em_hypercall, vmmcall),
4292 DIP(SrcNone | Prot | Priv, vmload, check_svme_pa),
4293 DIP(SrcNone | Prot | Priv, vmsave, check_svme_pa),
4294 DIP(SrcNone | Prot | Priv, stgi, check_svme),
4295 DIP(SrcNone | Prot | Priv, clgi, check_svme),
4296 DIP(SrcNone | Prot | Priv, skinit, check_svme),
4297 DIP(SrcNone | Prot | Priv, invlpga, check_svme),
4298 };
4299
4300 static const struct opcode group7_rm7[] = {
4301 N,
4302 DIP(SrcNone, rdtscp, check_rdtsc),
4303 N, N, N, N, N, N,
4304 };
4305
4306 static const struct opcode group1[] = {
4307 F(Lock, em_add),
4308 F(Lock | PageTable, em_or),
4309 F(Lock, em_adc),
4310 F(Lock, em_sbb),
4311 F(Lock | PageTable, em_and),
4312 F(Lock, em_sub),
4313 F(Lock, em_xor),
4314 F(NoWrite, em_cmp),
4315 };
4316
4317 static const struct opcode group1A[] = {
4318 I(DstMem | SrcNone | Mov | Stack | IncSP | TwoMemOp, em_pop), N, N, N, N, N, N, N,
4319 };
4320
4321 static const struct opcode group2[] = {
4322 F(DstMem | ModRM, em_rol),
4323 F(DstMem | ModRM, em_ror),
4324 F(DstMem | ModRM, em_rcl),
4325 F(DstMem | ModRM, em_rcr),
4326 F(DstMem | ModRM, em_shl),
4327 F(DstMem | ModRM, em_shr),
4328 F(DstMem | ModRM, em_shl),
4329 F(DstMem | ModRM, em_sar),
4330 };
4331
4332 static const struct opcode group3[] = {
4333 F(DstMem | SrcImm | NoWrite, em_test),
4334 F(DstMem | SrcImm | NoWrite, em_test),
4335 F(DstMem | SrcNone | Lock, em_not),
4336 F(DstMem | SrcNone | Lock, em_neg),
4337 F(DstXacc | Src2Mem, em_mul_ex),
4338 F(DstXacc | Src2Mem, em_imul_ex),
4339 F(DstXacc | Src2Mem, em_div_ex),
4340 F(DstXacc | Src2Mem, em_idiv_ex),
4341 };
4342
4343 static const struct opcode group4[] = {
4344 F(ByteOp | DstMem | SrcNone | Lock, em_inc),
4345 F(ByteOp | DstMem | SrcNone | Lock, em_dec),
4346 N, N, N, N, N, N,
4347 };
4348
4349 static const struct opcode group5[] = {
4350 F(DstMem | SrcNone | Lock, em_inc),
4351 F(DstMem | SrcNone | Lock, em_dec),
4352 I(SrcMem | NearBranch, em_call_near_abs),
4353 I(SrcMemFAddr | ImplicitOps, em_call_far),
4354 I(SrcMem | NearBranch, em_jmp_abs),
4355 I(SrcMemFAddr | ImplicitOps, em_jmp_far),
4356 I(SrcMem | Stack | TwoMemOp, em_push), D(Undefined),
4357 };
4358
4359 static const struct opcode group6[] = {
4360 DI(Prot | DstMem, sldt),
4361 DI(Prot | DstMem, str),
4362 II(Prot | Priv | SrcMem16, em_lldt, lldt),
4363 II(Prot | Priv | SrcMem16, em_ltr, ltr),
4364 N, N, N, N,
4365 };
4366
4367 static const struct group_dual group7 = { {
4368 II(Mov | DstMem, em_sgdt, sgdt),
4369 II(Mov | DstMem, em_sidt, sidt),
4370 II(SrcMem | Priv, em_lgdt, lgdt),
4371 II(SrcMem | Priv, em_lidt, lidt),
4372 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
4373 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
4374 II(SrcMem | ByteOp | Priv | NoAccess, em_invlpg, invlpg),
4375 }, {
4376 EXT(0, group7_rm0),
4377 EXT(0, group7_rm1),
4378 N, EXT(0, group7_rm3),
4379 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
4380 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
4381 EXT(0, group7_rm7),
4382 } };
4383
4384 static const struct opcode group8[] = {
4385 N, N, N, N,
4386 F(DstMem | SrcImmByte | NoWrite, em_bt),
4387 F(DstMem | SrcImmByte | Lock | PageTable, em_bts),
4388 F(DstMem | SrcImmByte | Lock, em_btr),
4389 F(DstMem | SrcImmByte | Lock | PageTable, em_btc),
4390 };
4391
4392 static const struct group_dual group9 = { {
4393 N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
4394 }, {
4395 N, N, N, N, N, N, N, N,
4396 } };
4397
4398 static const struct opcode group11[] = {
4399 I(DstMem | SrcImm | Mov | PageTable, em_mov),
4400 X7(D(Undefined)),
4401 };
4402
4403 static const struct gprefix pfx_0f_ae_7 = {
4404 I(SrcMem | ByteOp, em_clflush), N, N, N,
4405 };
4406
4407 static const struct group_dual group15 = { {
4408 I(ModRM | Aligned16, em_fxsave),
4409 I(ModRM | Aligned16, em_fxrstor),
4410 N, N, N, N, N, GP(0, &pfx_0f_ae_7),
4411 }, {
4412 N, N, N, N, N, N, N, N,
4413 } };
4414
4415 static const struct gprefix pfx_0f_6f_0f_7f = {
4416 I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
4417 };
4418
4419 static const struct instr_dual instr_dual_0f_2b = {
4420 I(0, em_mov), N
4421 };
4422
4423 static const struct gprefix pfx_0f_2b = {
4424 ID(0, &instr_dual_0f_2b), ID(0, &instr_dual_0f_2b), N, N,
4425 };
4426
4427 static const struct gprefix pfx_0f_28_0f_29 = {
4428 I(Aligned, em_mov), I(Aligned, em_mov), N, N,
4429 };
4430
4431 static const struct gprefix pfx_0f_e7 = {
4432 N, I(Sse, em_mov), N, N,
4433 };
4434
4435 static const struct escape escape_d9 = { {
4436 N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstcw),
4437 }, {
4438 /* 0xC0 - 0xC7 */
4439 N, N, N, N, N, N, N, N,
4440 /* 0xC8 - 0xCF */
4441 N, N, N, N, N, N, N, N,
4442 /* 0xD0 - 0xC7 */
4443 N, N, N, N, N, N, N, N,
4444 /* 0xD8 - 0xDF */
4445 N, N, N, N, N, N, N, N,
4446 /* 0xE0 - 0xE7 */
4447 N, N, N, N, N, N, N, N,
4448 /* 0xE8 - 0xEF */
4449 N, N, N, N, N, N, N, N,
4450 /* 0xF0 - 0xF7 */
4451 N, N, N, N, N, N, N, N,
4452 /* 0xF8 - 0xFF */
4453 N, N, N, N, N, N, N, N,
4454 } };
4455
4456 static const struct escape escape_db = { {
4457 N, N, N, N, N, N, N, N,
4458 }, {
4459 /* 0xC0 - 0xC7 */
4460 N, N, N, N, N, N, N, N,
4461 /* 0xC8 - 0xCF */
4462 N, N, N, N, N, N, N, N,
4463 /* 0xD0 - 0xC7 */
4464 N, N, N, N, N, N, N, N,
4465 /* 0xD8 - 0xDF */
4466 N, N, N, N, N, N, N, N,
4467 /* 0xE0 - 0xE7 */
4468 N, N, N, I(ImplicitOps, em_fninit), N, N, N, N,
4469 /* 0xE8 - 0xEF */
4470 N, N, N, N, N, N, N, N,
4471 /* 0xF0 - 0xF7 */
4472 N, N, N, N, N, N, N, N,
4473 /* 0xF8 - 0xFF */
4474 N, N, N, N, N, N, N, N,
4475 } };
4476
4477 static const struct escape escape_dd = { {
4478 N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstsw),
4479 }, {
4480 /* 0xC0 - 0xC7 */
4481 N, N, N, N, N, N, N, N,
4482 /* 0xC8 - 0xCF */
4483 N, N, N, N, N, N, N, N,
4484 /* 0xD0 - 0xC7 */
4485 N, N, N, N, N, N, N, N,
4486 /* 0xD8 - 0xDF */
4487 N, N, N, N, N, N, N, N,
4488 /* 0xE0 - 0xE7 */
4489 N, N, N, N, N, N, N, N,
4490 /* 0xE8 - 0xEF */
4491 N, N, N, N, N, N, N, N,
4492 /* 0xF0 - 0xF7 */
4493 N, N, N, N, N, N, N, N,
4494 /* 0xF8 - 0xFF */
4495 N, N, N, N, N, N, N, N,
4496 } };
4497
4498 static const struct instr_dual instr_dual_0f_c3 = {
4499 I(DstMem | SrcReg | ModRM | No16 | Mov, em_mov), N
4500 };
4501
4502 static const struct mode_dual mode_dual_63 = {
4503 N, I(DstReg | SrcMem32 | ModRM | Mov, em_movsxd)
4504 };
4505
4506 static const struct opcode opcode_table[256] = {
4507 /* 0x00 - 0x07 */
4508 F6ALU(Lock, em_add),
4509 I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
4510 I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
4511 /* 0x08 - 0x0F */
4512 F6ALU(Lock | PageTable, em_or),
4513 I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
4514 N,
4515 /* 0x10 - 0x17 */
4516 F6ALU(Lock, em_adc),
4517 I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
4518 I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
4519 /* 0x18 - 0x1F */
4520 F6ALU(Lock, em_sbb),
4521 I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
4522 I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
4523 /* 0x20 - 0x27 */
4524 F6ALU(Lock | PageTable, em_and), N, N,
4525 /* 0x28 - 0x2F */
4526 F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
4527 /* 0x30 - 0x37 */
4528 F6ALU(Lock, em_xor), N, N,
4529 /* 0x38 - 0x3F */
4530 F6ALU(NoWrite, em_cmp), N, N,
4531 /* 0x40 - 0x4F */
4532 X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)),
4533 /* 0x50 - 0x57 */
4534 X8(I(SrcReg | Stack, em_push)),
4535 /* 0x58 - 0x5F */
4536 X8(I(DstReg | Stack, em_pop)),
4537 /* 0x60 - 0x67 */
4538 I(ImplicitOps | Stack | No64, em_pusha),
4539 I(ImplicitOps | Stack | No64, em_popa),
4540 N, MD(ModRM, &mode_dual_63),
4541 N, N, N, N,
4542 /* 0x68 - 0x6F */
4543 I(SrcImm | Mov | Stack, em_push),
4544 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
4545 I(SrcImmByte | Mov | Stack, em_push),
4546 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
4547 I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */
4548 I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */
4549 /* 0x70 - 0x7F */
4550 X16(D(SrcImmByte | NearBranch)),
4551 /* 0x80 - 0x87 */
4552 G(ByteOp | DstMem | SrcImm, group1),
4553 G(DstMem | SrcImm, group1),
4554 G(ByteOp | DstMem | SrcImm | No64, group1),
4555 G(DstMem | SrcImmByte, group1),
4556 F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test),
4557 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
4558 /* 0x88 - 0x8F */
4559 I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
4560 I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
4561 I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
4562 D(ModRM | SrcMem | NoAccess | DstReg),
4563 I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
4564 G(0, group1A),
4565 /* 0x90 - 0x97 */
4566 DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
4567 /* 0x98 - 0x9F */
4568 D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
4569 I(SrcImmFAddr | No64, em_call_far), N,
4570 II(ImplicitOps | Stack, em_pushf, pushf),
4571 II(ImplicitOps | Stack, em_popf, popf),
4572 I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf),
4573 /* 0xA0 - 0xA7 */
4574 I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
4575 I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
4576 I2bv(SrcSI | DstDI | Mov | String | TwoMemOp, em_mov),
4577 F2bv(SrcSI | DstDI | String | NoWrite | TwoMemOp, em_cmp_r),
4578 /* 0xA8 - 0xAF */
4579 F2bv(DstAcc | SrcImm | NoWrite, em_test),
4580 I2bv(SrcAcc | DstDI | Mov | String, em_mov),
4581 I2bv(SrcSI | DstAcc | Mov | String, em_mov),
4582 F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp_r),
4583 /* 0xB0 - 0xB7 */
4584 X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
4585 /* 0xB8 - 0xBF */
4586 X8(I(DstReg | SrcImm64 | Mov, em_mov)),
4587 /* 0xC0 - 0xC7 */
4588 G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2),
4589 I(ImplicitOps | NearBranch | SrcImmU16, em_ret_near_imm),
4590 I(ImplicitOps | NearBranch, em_ret),
4591 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
4592 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
4593 G(ByteOp, group11), G(0, group11),
4594 /* 0xC8 - 0xCF */
4595 I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave),
4596 I(ImplicitOps | SrcImmU16, em_ret_far_imm),
4597 I(ImplicitOps, em_ret_far),
4598 D(ImplicitOps), DI(SrcImmByte, intn),
4599 D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
4600 /* 0xD0 - 0xD7 */
4601 G(Src2One | ByteOp, group2), G(Src2One, group2),
4602 G(Src2CL | ByteOp, group2), G(Src2CL, group2),
4603 I(DstAcc | SrcImmUByte | No64, em_aam),
4604 I(DstAcc | SrcImmUByte | No64, em_aad),
4605 F(DstAcc | ByteOp | No64, em_salc),
4606 I(DstAcc | SrcXLat | ByteOp, em_mov),
4607 /* 0xD8 - 0xDF */
4608 N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
4609 /* 0xE0 - 0xE7 */
4610 X3(I(SrcImmByte | NearBranch, em_loop)),
4611 I(SrcImmByte | NearBranch, em_jcxz),
4612 I2bvIP(SrcImmUByte | DstAcc, em_in, in, check_perm_in),
4613 I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
4614 /* 0xE8 - 0xEF */
4615 I(SrcImm | NearBranch, em_call), D(SrcImm | ImplicitOps | NearBranch),
4616 I(SrcImmFAddr | No64, em_jmp_far),
4617 D(SrcImmByte | ImplicitOps | NearBranch),
4618 I2bvIP(SrcDX | DstAcc, em_in, in, check_perm_in),
4619 I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
4620 /* 0xF0 - 0xF7 */
4621 N, DI(ImplicitOps, icebp), N, N,
4622 DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
4623 G(ByteOp, group3), G(0, group3),
4624 /* 0xF8 - 0xFF */
4625 D(ImplicitOps), D(ImplicitOps),
4626 I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
4627 D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
4628 };
4629
4630 static const struct opcode twobyte_table[256] = {
4631 /* 0x00 - 0x0F */
4632 G(0, group6), GD(0, &group7), N, N,
4633 N, I(ImplicitOps | EmulateOnUD, em_syscall),
4634 II(ImplicitOps | Priv, em_clts, clts), N,
4635 DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
4636 N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
4637 /* 0x10 - 0x1F */
4638 N, N, N, N, N, N, N, N,
4639 D(ImplicitOps | ModRM | SrcMem | NoAccess),
4640 N, N, N, N, N, N, D(ImplicitOps | ModRM | SrcMem | NoAccess),
4641 /* 0x20 - 0x2F */
4642 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read),
4643 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
4644 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
4645 check_cr_write),
4646 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
4647 check_dr_write),
4648 N, N, N, N,
4649 GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29),
4650 GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29),
4651 N, GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_2b),
4652 N, N, N, N,
4653 /* 0x30 - 0x3F */
4654 II(ImplicitOps | Priv, em_wrmsr, wrmsr),
4655 IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
4656 II(ImplicitOps | Priv, em_rdmsr, rdmsr),
4657 IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
4658 I(ImplicitOps | EmulateOnUD, em_sysenter),
4659 I(ImplicitOps | Priv | EmulateOnUD, em_sysexit),
4660 N, N,
4661 N, N, N, N, N, N, N, N,
4662 /* 0x40 - 0x4F */
4663 X16(D(DstReg | SrcMem | ModRM)),
4664 /* 0x50 - 0x5F */
4665 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4666 /* 0x60 - 0x6F */
4667 N, N, N, N,
4668 N, N, N, N,
4669 N, N, N, N,
4670 N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
4671 /* 0x70 - 0x7F */
4672 N, N, N, N,
4673 N, N, N, N,
4674 N, N, N, N,
4675 N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
4676 /* 0x80 - 0x8F */
4677 X16(D(SrcImm | NearBranch)),
4678 /* 0x90 - 0x9F */
4679 X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
4680 /* 0xA0 - 0xA7 */
4681 I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
4682 II(ImplicitOps, em_cpuid, cpuid),
4683 F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt),
4684 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld),
4685 F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N,
4686 /* 0xA8 - 0xAF */
4687 I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
4688 II(EmulateOnUD | ImplicitOps, em_rsm, rsm),
4689 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
4690 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
4691 F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
4692 GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul),
4693 /* 0xB0 - 0xB7 */
4694 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable | SrcWrite, em_cmpxchg),
4695 I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
4696 F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
4697 I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
4698 I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
4699 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4700 /* 0xB8 - 0xBF */
4701 N, N,
4702 G(BitOp, group8),
4703 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
4704 I(DstReg | SrcMem | ModRM, em_bsf_c),
4705 I(DstReg | SrcMem | ModRM, em_bsr_c),
4706 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4707 /* 0xC0 - 0xC7 */
4708 F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
4709 N, ID(0, &instr_dual_0f_c3),
4710 N, N, N, GD(0, &group9),
4711 /* 0xC8 - 0xCF */
4712 X8(I(DstReg, em_bswap)),
4713 /* 0xD0 - 0xDF */
4714 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4715 /* 0xE0 - 0xEF */
4716 N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_e7),
4717 N, N, N, N, N, N, N, N,
4718 /* 0xF0 - 0xFF */
4719 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
4720 };
4721
4722 static const struct instr_dual instr_dual_0f_38_f0 = {
4723 I(DstReg | SrcMem | Mov, em_movbe), N
4724 };
4725
4726 static const struct instr_dual instr_dual_0f_38_f1 = {
4727 I(DstMem | SrcReg | Mov, em_movbe), N
4728 };
4729
4730 static const struct gprefix three_byte_0f_38_f0 = {
4731 ID(0, &instr_dual_0f_38_f0), N, N, N
4732 };
4733
4734 static const struct gprefix three_byte_0f_38_f1 = {
4735 ID(0, &instr_dual_0f_38_f1), N, N, N
4736 };
4737
4738 /*
4739 * Insns below are selected by the prefix which indexed by the third opcode
4740 * byte.
4741 */
4742 static const struct opcode opcode_map_0f_38[256] = {
4743 /* 0x00 - 0x7f */
4744 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4745 /* 0x80 - 0xef */
4746 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4747 /* 0xf0 - 0xf1 */
4748 GP(EmulateOnUD | ModRM, &three_byte_0f_38_f0),
4749 GP(EmulateOnUD | ModRM, &three_byte_0f_38_f1),
4750 /* 0xf2 - 0xff */
4751 N, N, X4(N), X8(N)
4752 };
4753
4754 #undef D
4755 #undef N
4756 #undef G
4757 #undef GD
4758 #undef I
4759 #undef GP
4760 #undef EXT
4761 #undef MD
4762 #undef ID
4763
4764 #undef D2bv
4765 #undef D2bvIP
4766 #undef I2bv
4767 #undef I2bvIP
4768 #undef I6ALU
4769
4770 static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
4771 {
4772 unsigned size;
4773
4774 size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4775 if (size == 8)
4776 size = 4;
4777 return size;
4778 }
4779
4780 static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
4781 unsigned size, bool sign_extension)
4782 {
4783 int rc = X86EMUL_CONTINUE;
4784
4785 op->type = OP_IMM;
4786 op->bytes = size;
4787 op->addr.mem.ea = ctxt->_eip;
4788 /* NB. Immediates are sign-extended as necessary. */
4789 switch (op->bytes) {
4790 case 1:
4791 op->val = insn_fetch(s8, ctxt);
4792 break;
4793 case 2:
4794 op->val = insn_fetch(s16, ctxt);
4795 break;
4796 case 4:
4797 op->val = insn_fetch(s32, ctxt);
4798 break;
4799 case 8:
4800 op->val = insn_fetch(s64, ctxt);
4801 break;
4802 }
4803 if (!sign_extension) {
4804 switch (op->bytes) {
4805 case 1:
4806 op->val &= 0xff;
4807 break;
4808 case 2:
4809 op->val &= 0xffff;
4810 break;
4811 case 4:
4812 op->val &= 0xffffffff;
4813 break;
4814 }
4815 }
4816 done:
4817 return rc;
4818 }
4819
4820 static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
4821 unsigned d)
4822 {
4823 int rc = X86EMUL_CONTINUE;
4824
4825 switch (d) {
4826 case OpReg:
4827 decode_register_operand(ctxt, op);
4828 break;
4829 case OpImmUByte:
4830 rc = decode_imm(ctxt, op, 1, false);
4831 break;
4832 case OpMem:
4833 ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4834 mem_common:
4835 *op = ctxt->memop;
4836 ctxt->memopp = op;
4837 if (ctxt->d & BitOp)
4838 fetch_bit_operand(ctxt);
4839 op->orig_val = op->val;
4840 break;
4841 case OpMem64:
4842 ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8;
4843 goto mem_common;
4844 case OpAcc:
4845 op->type = OP_REG;
4846 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4847 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4848 fetch_register_operand(op);
4849 op->orig_val = op->val;
4850 break;
4851 case OpAccLo:
4852 op->type = OP_REG;
4853 op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes;
4854 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4855 fetch_register_operand(op);
4856 op->orig_val = op->val;
4857 break;
4858 case OpAccHi:
4859 if (ctxt->d & ByteOp) {
4860 op->type = OP_NONE;
4861 break;
4862 }
4863 op->type = OP_REG;
4864 op->bytes = ctxt->op_bytes;
4865 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4866 fetch_register_operand(op);
4867 op->orig_val = op->val;
4868 break;
4869 case OpDI:
4870 op->type = OP_MEM;
4871 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4872 op->addr.mem.ea =
4873 register_address(ctxt, VCPU_REGS_RDI);
4874 op->addr.mem.seg = VCPU_SREG_ES;
4875 op->val = 0;
4876 op->count = 1;
4877 break;
4878 case OpDX:
4879 op->type = OP_REG;
4880 op->bytes = 2;
4881 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4882 fetch_register_operand(op);
4883 break;
4884 case OpCL:
4885 op->type = OP_IMM;
4886 op->bytes = 1;
4887 op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
4888 break;
4889 case OpImmByte:
4890 rc = decode_imm(ctxt, op, 1, true);
4891 break;
4892 case OpOne:
4893 op->type = OP_IMM;
4894 op->bytes = 1;
4895 op->val = 1;
4896 break;
4897 case OpImm:
4898 rc = decode_imm(ctxt, op, imm_size(ctxt), true);
4899 break;
4900 case OpImm64:
4901 rc = decode_imm(ctxt, op, ctxt->op_bytes, true);
4902 break;
4903 case OpMem8:
4904 ctxt->memop.bytes = 1;
4905 if (ctxt->memop.type == OP_REG) {
4906 ctxt->memop.addr.reg = decode_register(ctxt,
4907 ctxt->modrm_rm, true);
4908 fetch_register_operand(&ctxt->memop);
4909 }
4910 goto mem_common;
4911 case OpMem16:
4912 ctxt->memop.bytes = 2;
4913 goto mem_common;
4914 case OpMem32:
4915 ctxt->memop.bytes = 4;
4916 goto mem_common;
4917 case OpImmU16:
4918 rc = decode_imm(ctxt, op, 2, false);
4919 break;
4920 case OpImmU:
4921 rc = decode_imm(ctxt, op, imm_size(ctxt), false);
4922 break;
4923 case OpSI:
4924 op->type = OP_MEM;
4925 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4926 op->addr.mem.ea =
4927 register_address(ctxt, VCPU_REGS_RSI);
4928 op->addr.mem.seg = ctxt->seg_override;
4929 op->val = 0;
4930 op->count = 1;
4931 break;
4932 case OpXLat:
4933 op->type = OP_MEM;
4934 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4935 op->addr.mem.ea =
4936 address_mask(ctxt,
4937 reg_read(ctxt, VCPU_REGS_RBX) +
4938 (reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
4939 op->addr.mem.seg = ctxt->seg_override;
4940 op->val = 0;
4941 break;
4942 case OpImmFAddr:
4943 op->type = OP_IMM;
4944 op->addr.mem.ea = ctxt->_eip;
4945 op->bytes = ctxt->op_bytes + 2;
4946 insn_fetch_arr(op->valptr, op->bytes, ctxt);
4947 break;
4948 case OpMemFAddr:
4949 ctxt->memop.bytes = ctxt->op_bytes + 2;
4950 goto mem_common;
4951 case OpES:
4952 op->type = OP_IMM;
4953 op->val = VCPU_SREG_ES;
4954 break;
4955 case OpCS:
4956 op->type = OP_IMM;
4957 op->val = VCPU_SREG_CS;
4958 break;
4959 case OpSS:
4960 op->type = OP_IMM;
4961 op->val = VCPU_SREG_SS;
4962 break;
4963 case OpDS:
4964 op->type = OP_IMM;
4965 op->val = VCPU_SREG_DS;
4966 break;
4967 case OpFS:
4968 op->type = OP_IMM;
4969 op->val = VCPU_SREG_FS;
4970 break;
4971 case OpGS:
4972 op->type = OP_IMM;
4973 op->val = VCPU_SREG_GS;
4974 break;
4975 case OpImplicit:
4976 /* Special instructions do their own operand decoding. */
4977 default:
4978 op->type = OP_NONE; /* Disable writeback. */
4979 break;
4980 }
4981
4982 done:
4983 return rc;
4984 }
4985
4986 int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
4987 {
4988 int rc = X86EMUL_CONTINUE;
4989 int mode = ctxt->mode;
4990 int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
4991 bool op_prefix = false;
4992 bool has_seg_override = false;
4993 struct opcode opcode;
4994
4995 ctxt->memop.type = OP_NONE;
4996 ctxt->memopp = NULL;
4997 ctxt->_eip = ctxt->eip;
4998 ctxt->fetch.ptr = ctxt->fetch.data;
4999 ctxt->fetch.end = ctxt->fetch.data + insn_len;
5000 ctxt->opcode_len = 1;
5001 if (insn_len > 0)
5002 memcpy(ctxt->fetch.data, insn, insn_len);
5003 else {
5004 rc = __do_insn_fetch_bytes(ctxt, 1);
5005 if (rc != X86EMUL_CONTINUE)
5006 return rc;
5007 }
5008
5009 switch (mode) {
5010 case X86EMUL_MODE_REAL:
5011 case X86EMUL_MODE_VM86:
5012 case X86EMUL_MODE_PROT16:
5013 def_op_bytes = def_ad_bytes = 2;
5014 break;
5015 case X86EMUL_MODE_PROT32:
5016 def_op_bytes = def_ad_bytes = 4;
5017 break;
5018 #ifdef CONFIG_X86_64
5019 case X86EMUL_MODE_PROT64:
5020 def_op_bytes = 4;
5021 def_ad_bytes = 8;
5022 break;
5023 #endif
5024 default:
5025 return EMULATION_FAILED;
5026 }
5027
5028 ctxt->op_bytes = def_op_bytes;
5029 ctxt->ad_bytes = def_ad_bytes;
5030
5031 /* Legacy prefixes. */
5032 for (;;) {
5033 switch (ctxt->b = insn_fetch(u8, ctxt)) {
5034 case 0x66: /* operand-size override */
5035 op_prefix = true;
5036 /* switch between 2/4 bytes */
5037 ctxt->op_bytes = def_op_bytes ^ 6;
5038 break;
5039 case 0x67: /* address-size override */
5040 if (mode == X86EMUL_MODE_PROT64)
5041 /* switch between 4/8 bytes */
5042 ctxt->ad_bytes = def_ad_bytes ^ 12;
5043 else
5044 /* switch between 2/4 bytes */
5045 ctxt->ad_bytes = def_ad_bytes ^ 6;
5046 break;
5047 case 0x26: /* ES override */
5048 case 0x2e: /* CS override */
5049 case 0x36: /* SS override */
5050 case 0x3e: /* DS override */
5051 has_seg_override = true;
5052 ctxt->seg_override = (ctxt->b >> 3) & 3;
5053 break;
5054 case 0x64: /* FS override */
5055 case 0x65: /* GS override */
5056 has_seg_override = true;
5057 ctxt->seg_override = ctxt->b & 7;
5058 break;
5059 case 0x40 ... 0x4f: /* REX */
5060 if (mode != X86EMUL_MODE_PROT64)
5061 goto done_prefixes;
5062 ctxt->rex_prefix = ctxt->b;
5063 continue;
5064 case 0xf0: /* LOCK */
5065 ctxt->lock_prefix = 1;
5066 break;
5067 case 0xf2: /* REPNE/REPNZ */
5068 case 0xf3: /* REP/REPE/REPZ */
5069 ctxt->rep_prefix = ctxt->b;
5070 break;
5071 default:
5072 goto done_prefixes;
5073 }
5074
5075 /* Any legacy prefix after a REX prefix nullifies its effect. */
5076
5077 ctxt->rex_prefix = 0;
5078 }
5079
5080 done_prefixes:
5081
5082 /* REX prefix. */
5083 if (ctxt->rex_prefix & 8)
5084 ctxt->op_bytes = 8; /* REX.W */
5085
5086 /* Opcode byte(s). */
5087 opcode = opcode_table[ctxt->b];
5088 /* Two-byte opcode? */
5089 if (ctxt->b == 0x0f) {
5090 ctxt->opcode_len = 2;
5091 ctxt->b = insn_fetch(u8, ctxt);
5092 opcode = twobyte_table[ctxt->b];
5093
5094 /* 0F_38 opcode map */
5095 if (ctxt->b == 0x38) {
5096 ctxt->opcode_len = 3;
5097 ctxt->b = insn_fetch(u8, ctxt);
5098 opcode = opcode_map_0f_38[ctxt->b];
5099 }
5100 }
5101 ctxt->d = opcode.flags;
5102
5103 if (ctxt->d & ModRM)
5104 ctxt->modrm = insn_fetch(u8, ctxt);
5105
5106 /* vex-prefix instructions are not implemented */
5107 if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
5108 (mode == X86EMUL_MODE_PROT64 || (ctxt->modrm & 0xc0) == 0xc0)) {
5109 ctxt->d = NotImpl;
5110 }
5111
5112 while (ctxt->d & GroupMask) {
5113 switch (ctxt->d & GroupMask) {
5114 case Group:
5115 goffset = (ctxt->modrm >> 3) & 7;
5116 opcode = opcode.u.group[goffset];
5117 break;
5118 case GroupDual:
5119 goffset = (ctxt->modrm >> 3) & 7;
5120 if ((ctxt->modrm >> 6) == 3)
5121 opcode = opcode.u.gdual->mod3[goffset];
5122 else
5123 opcode = opcode.u.gdual->mod012[goffset];
5124 break;
5125 case RMExt:
5126 goffset = ctxt->modrm & 7;
5127 opcode = opcode.u.group[goffset];
5128 break;
5129 case Prefix:
5130 if (ctxt->rep_prefix && op_prefix)
5131 return EMULATION_FAILED;
5132 simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
5133 switch (simd_prefix) {
5134 case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
5135 case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
5136 case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
5137 case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
5138 }
5139 break;
5140 case Escape:
5141 if (ctxt->modrm > 0xbf)
5142 opcode = opcode.u.esc->high[ctxt->modrm - 0xc0];
5143 else
5144 opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
5145 break;
5146 case InstrDual:
5147 if ((ctxt->modrm >> 6) == 3)
5148 opcode = opcode.u.idual->mod3;
5149 else
5150 opcode = opcode.u.idual->mod012;
5151 break;
5152 case ModeDual:
5153 if (ctxt->mode == X86EMUL_MODE_PROT64)
5154 opcode = opcode.u.mdual->mode64;
5155 else
5156 opcode = opcode.u.mdual->mode32;
5157 break;
5158 default:
5159 return EMULATION_FAILED;
5160 }
5161
5162 ctxt->d &= ~(u64)GroupMask;
5163 ctxt->d |= opcode.flags;
5164 }
5165
5166 /* Unrecognised? */
5167 if (ctxt->d == 0)
5168 return EMULATION_FAILED;
5169
5170 ctxt->execute = opcode.u.execute;
5171
5172 if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD)))
5173 return EMULATION_FAILED;
5174
5175 if (unlikely(ctxt->d &
5176 (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm|NearBranch|
5177 No16))) {
5178 /*
5179 * These are copied unconditionally here, and checked unconditionally
5180 * in x86_emulate_insn.
5181 */
5182 ctxt->check_perm = opcode.check_perm;
5183 ctxt->intercept = opcode.intercept;
5184
5185 if (ctxt->d & NotImpl)
5186 return EMULATION_FAILED;
5187
5188 if (mode == X86EMUL_MODE_PROT64) {
5189 if (ctxt->op_bytes == 4 && (ctxt->d & Stack))
5190 ctxt->op_bytes = 8;
5191 else if (ctxt->d & NearBranch)
5192 ctxt->op_bytes = 8;
5193 }
5194
5195 if (ctxt->d & Op3264) {
5196 if (mode == X86EMUL_MODE_PROT64)
5197 ctxt->op_bytes = 8;
5198 else
5199 ctxt->op_bytes = 4;
5200 }
5201
5202 if ((ctxt->d & No16) && ctxt->op_bytes == 2)
5203 ctxt->op_bytes = 4;
5204
5205 if (ctxt->d & Sse)
5206 ctxt->op_bytes = 16;
5207 else if (ctxt->d & Mmx)
5208 ctxt->op_bytes = 8;
5209 }
5210
5211 /* ModRM and SIB bytes. */
5212 if (ctxt->d & ModRM) {
5213 rc = decode_modrm(ctxt, &ctxt->memop);
5214 if (!has_seg_override) {
5215 has_seg_override = true;
5216 ctxt->seg_override = ctxt->modrm_seg;
5217 }
5218 } else if (ctxt->d & MemAbs)
5219 rc = decode_abs(ctxt, &ctxt->memop);
5220 if (rc != X86EMUL_CONTINUE)
5221 goto done;
5222
5223 if (!has_seg_override)
5224 ctxt->seg_override = VCPU_SREG_DS;
5225
5226 ctxt->memop.addr.mem.seg = ctxt->seg_override;
5227
5228 /*
5229 * Decode and fetch the source operand: register, memory
5230 * or immediate.
5231 */
5232 rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
5233 if (rc != X86EMUL_CONTINUE)
5234 goto done;
5235
5236 /*
5237 * Decode and fetch the second source operand: register, memory
5238 * or immediate.
5239 */
5240 rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
5241 if (rc != X86EMUL_CONTINUE)
5242 goto done;
5243
5244 /* Decode and fetch the destination operand: register or memory. */
5245 rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
5246
5247 if (ctxt->rip_relative && likely(ctxt->memopp))
5248 ctxt->memopp->addr.mem.ea = address_mask(ctxt,
5249 ctxt->memopp->addr.mem.ea + ctxt->_eip);
5250
5251 done:
5252 return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
5253 }
5254
5255 bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
5256 {
5257 return ctxt->d & PageTable;
5258 }
5259
5260 static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
5261 {
5262 /* The second termination condition only applies for REPE
5263 * and REPNE. Test if the repeat string operation prefix is
5264 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
5265 * corresponding termination condition according to:
5266 * - if REPE/REPZ and ZF = 0 then done
5267 * - if REPNE/REPNZ and ZF = 1 then done
5268 */
5269 if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
5270 (ctxt->b == 0xae) || (ctxt->b == 0xaf))
5271 && (((ctxt->rep_prefix == REPE_PREFIX) &&
5272 ((ctxt->eflags & X86_EFLAGS_ZF) == 0))
5273 || ((ctxt->rep_prefix == REPNE_PREFIX) &&
5274 ((ctxt->eflags & X86_EFLAGS_ZF) == X86_EFLAGS_ZF))))
5275 return true;
5276
5277 return false;
5278 }
5279
5280 static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
5281 {
5282 int rc;
5283
5284 ctxt->ops->get_fpu(ctxt);
5285 rc = asm_safe("fwait");
5286 ctxt->ops->put_fpu(ctxt);
5287
5288 if (unlikely(rc != X86EMUL_CONTINUE))
5289 return emulate_exception(ctxt, MF_VECTOR, 0, false);
5290
5291 return X86EMUL_CONTINUE;
5292 }
5293
5294 static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt,
5295 struct operand *op)
5296 {
5297 if (op->type == OP_MM)
5298 read_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
5299 }
5300
5301 static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *))
5302 {
5303 ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
5304
5305 if (!(ctxt->d & ByteOp))
5306 fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
5307
5308 asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n"
5309 : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
5310 [fastop]"+S"(fop), ASM_CALL_CONSTRAINT
5311 : "c"(ctxt->src2.val));
5312
5313 ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
5314 if (!fop) /* exception is returned in fop variable */
5315 return emulate_de(ctxt);
5316 return X86EMUL_CONTINUE;
5317 }
5318
5319 void init_decode_cache(struct x86_emulate_ctxt *ctxt)
5320 {
5321 memset(&ctxt->rip_relative, 0,
5322 (void *)&ctxt->modrm - (void *)&ctxt->rip_relative);
5323
5324 ctxt->io_read.pos = 0;
5325 ctxt->io_read.end = 0;
5326 ctxt->mem_read.end = 0;
5327 }
5328
5329 int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
5330 {
5331 const struct x86_emulate_ops *ops = ctxt->ops;
5332 int rc = X86EMUL_CONTINUE;
5333 int saved_dst_type = ctxt->dst.type;
5334 unsigned emul_flags;
5335
5336 ctxt->mem_read.pos = 0;
5337
5338 /* LOCK prefix is allowed only with some instructions */
5339 if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
5340 rc = emulate_ud(ctxt);
5341 goto done;
5342 }
5343
5344 if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
5345 rc = emulate_ud(ctxt);
5346 goto done;
5347 }
5348
5349 emul_flags = ctxt->ops->get_hflags(ctxt);
5350 if (unlikely(ctxt->d &
5351 (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
5352 if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
5353 (ctxt->d & Undefined)) {
5354 rc = emulate_ud(ctxt);
5355 goto done;
5356 }
5357
5358 if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
5359 || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
5360 rc = emulate_ud(ctxt);
5361 goto done;
5362 }
5363
5364 if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
5365 rc = emulate_nm(ctxt);
5366 goto done;
5367 }
5368
5369 if (ctxt->d & Mmx) {
5370 rc = flush_pending_x87_faults(ctxt);
5371 if (rc != X86EMUL_CONTINUE)
5372 goto done;
5373 /*
5374 * Now that we know the fpu is exception safe, we can fetch
5375 * operands from it.
5376 */
5377 fetch_possible_mmx_operand(ctxt, &ctxt->src);
5378 fetch_possible_mmx_operand(ctxt, &ctxt->src2);
5379 if (!(ctxt->d & Mov))
5380 fetch_possible_mmx_operand(ctxt, &ctxt->dst);
5381 }
5382
5383 if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && ctxt->intercept) {
5384 rc = emulator_check_intercept(ctxt, ctxt->intercept,
5385 X86_ICPT_PRE_EXCEPT);
5386 if (rc != X86EMUL_CONTINUE)
5387 goto done;
5388 }
5389
5390 /* Instruction can only be executed in protected mode */
5391 if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
5392 rc = emulate_ud(ctxt);
5393 goto done;
5394 }
5395
5396 /* Privileged instruction can be executed only in CPL=0 */
5397 if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
5398 if (ctxt->d & PrivUD)
5399 rc = emulate_ud(ctxt);
5400 else
5401 rc = emulate_gp(ctxt, 0);
5402 goto done;
5403 }
5404
5405 /* Do instruction specific permission checks */
5406 if (ctxt->d & CheckPerm) {
5407 rc = ctxt->check_perm(ctxt);
5408 if (rc != X86EMUL_CONTINUE)
5409 goto done;
5410 }
5411
5412 if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
5413 rc = emulator_check_intercept(ctxt, ctxt->intercept,
5414 X86_ICPT_POST_EXCEPT);
5415 if (rc != X86EMUL_CONTINUE)
5416 goto done;
5417 }
5418
5419 if (ctxt->rep_prefix && (ctxt->d & String)) {
5420 /* All REP prefixes have the same first termination condition */
5421 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
5422 string_registers_quirk(ctxt);
5423 ctxt->eip = ctxt->_eip;
5424 ctxt->eflags &= ~X86_EFLAGS_RF;
5425 goto done;
5426 }
5427 }
5428 }
5429
5430 if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
5431 rc = segmented_read(ctxt, ctxt->src.addr.mem,
5432 ctxt->src.valptr, ctxt->src.bytes);
5433 if (rc != X86EMUL_CONTINUE)
5434 goto done;
5435 ctxt->src.orig_val64 = ctxt->src.val64;
5436 }
5437
5438 if (ctxt->src2.type == OP_MEM) {
5439 rc = segmented_read(ctxt, ctxt->src2.addr.mem,
5440 &ctxt->src2.val, ctxt->src2.bytes);
5441 if (rc != X86EMUL_CONTINUE)
5442 goto done;
5443 }
5444
5445 if ((ctxt->d & DstMask) == ImplicitOps)
5446 goto special_insn;
5447
5448
5449 if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
5450 /* optimisation - avoid slow emulated read if Mov */
5451 rc = segmented_read(ctxt, ctxt->dst.addr.mem,
5452 &ctxt->dst.val, ctxt->dst.bytes);
5453 if (rc != X86EMUL_CONTINUE) {
5454 if (!(ctxt->d & NoWrite) &&
5455 rc == X86EMUL_PROPAGATE_FAULT &&
5456 ctxt->exception.vector == PF_VECTOR)
5457 ctxt->exception.error_code |= PFERR_WRITE_MASK;
5458 goto done;
5459 }
5460 }
5461 /* Copy full 64-bit value for CMPXCHG8B. */
5462 ctxt->dst.orig_val64 = ctxt->dst.val64;
5463
5464 special_insn:
5465
5466 if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
5467 rc = emulator_check_intercept(ctxt, ctxt->intercept,
5468 X86_ICPT_POST_MEMACCESS);
5469 if (rc != X86EMUL_CONTINUE)
5470 goto done;
5471 }
5472
5473 if (ctxt->rep_prefix && (ctxt->d & String))
5474 ctxt->eflags |= X86_EFLAGS_RF;
5475 else
5476 ctxt->eflags &= ~X86_EFLAGS_RF;
5477
5478 if (ctxt->execute) {
5479 if (ctxt->d & Fastop) {
5480 void (*fop)(struct fastop *) = (void *)ctxt->execute;
5481 rc = fastop(ctxt, fop);
5482 if (rc != X86EMUL_CONTINUE)
5483 goto done;
5484 goto writeback;
5485 }
5486 rc = ctxt->execute(ctxt);
5487 if (rc != X86EMUL_CONTINUE)
5488 goto done;
5489 goto writeback;
5490 }
5491
5492 if (ctxt->opcode_len == 2)
5493 goto twobyte_insn;
5494 else if (ctxt->opcode_len == 3)
5495 goto threebyte_insn;
5496
5497 switch (ctxt->b) {
5498 case 0x70 ... 0x7f: /* jcc (short) */
5499 if (test_cc(ctxt->b, ctxt->eflags))
5500 rc = jmp_rel(ctxt, ctxt->src.val);
5501 break;
5502 case 0x8d: /* lea r16/r32, m */
5503 ctxt->dst.val = ctxt->src.addr.mem.ea;
5504 break;
5505 case 0x90 ... 0x97: /* nop / xchg reg, rax */
5506 if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX))
5507 ctxt->dst.type = OP_NONE;
5508 else
5509 rc = em_xchg(ctxt);
5510 break;
5511 case 0x98: /* cbw/cwde/cdqe */
5512 switch (ctxt->op_bytes) {
5513 case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
5514 case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
5515 case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
5516 }
5517 break;
5518 case 0xcc: /* int3 */
5519 rc = emulate_int(ctxt, 3);
5520 break;
5521 case 0xcd: /* int n */
5522 rc = emulate_int(ctxt, ctxt->src.val);
5523 break;
5524 case 0xce: /* into */
5525 if (ctxt->eflags & X86_EFLAGS_OF)
5526 rc = emulate_int(ctxt, 4);
5527 break;
5528 case 0xe9: /* jmp rel */
5529 case 0xeb: /* jmp rel short */
5530 rc = jmp_rel(ctxt, ctxt->src.val);
5531 ctxt->dst.type = OP_NONE; /* Disable writeback. */
5532 break;
5533 case 0xf4: /* hlt */
5534 ctxt->ops->halt(ctxt);
5535 break;
5536 case 0xf5: /* cmc */
5537 /* complement carry flag from eflags reg */
5538 ctxt->eflags ^= X86_EFLAGS_CF;
5539 break;
5540 case 0xf8: /* clc */
5541 ctxt->eflags &= ~X86_EFLAGS_CF;
5542 break;
5543 case 0xf9: /* stc */
5544 ctxt->eflags |= X86_EFLAGS_CF;
5545 break;
5546 case 0xfc: /* cld */
5547 ctxt->eflags &= ~X86_EFLAGS_DF;
5548 break;
5549 case 0xfd: /* std */
5550 ctxt->eflags |= X86_EFLAGS_DF;
5551 break;
5552 default:
5553 goto cannot_emulate;
5554 }
5555
5556 if (rc != X86EMUL_CONTINUE)
5557 goto done;
5558
5559 writeback:
5560 if (ctxt->d & SrcWrite) {
5561 BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR);
5562 rc = writeback(ctxt, &ctxt->src);
5563 if (rc != X86EMUL_CONTINUE)
5564 goto done;
5565 }
5566 if (!(ctxt->d & NoWrite)) {
5567 rc = writeback(ctxt, &ctxt->dst);
5568 if (rc != X86EMUL_CONTINUE)
5569 goto done;
5570 }
5571
5572 /*
5573 * restore dst type in case the decoding will be reused
5574 * (happens for string instruction )
5575 */
5576 ctxt->dst.type = saved_dst_type;
5577
5578 if ((ctxt->d & SrcMask) == SrcSI)
5579 string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src);
5580
5581 if ((ctxt->d & DstMask) == DstDI)
5582 string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst);
5583
5584 if (ctxt->rep_prefix && (ctxt->d & String)) {
5585 unsigned int count;
5586 struct read_cache *r = &ctxt->io_read;
5587 if ((ctxt->d & SrcMask) == SrcSI)
5588 count = ctxt->src.count;
5589 else
5590 count = ctxt->dst.count;
5591 register_address_increment(ctxt, VCPU_REGS_RCX, -count);
5592
5593 if (!string_insn_completed(ctxt)) {
5594 /*
5595 * Re-enter guest when pio read ahead buffer is empty
5596 * or, if it is not used, after each 1024 iteration.
5597 */
5598 if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) &&
5599 (r->end == 0 || r->end != r->pos)) {
5600 /*
5601 * Reset read cache. Usually happens before
5602 * decode, but since instruction is restarted
5603 * we have to do it here.
5604 */
5605 ctxt->mem_read.end = 0;
5606 writeback_registers(ctxt);
5607 return EMULATION_RESTART;
5608 }
5609 goto done; /* skip rip writeback */
5610 }
5611 ctxt->eflags &= ~X86_EFLAGS_RF;
5612 }
5613
5614 ctxt->eip = ctxt->_eip;
5615
5616 done:
5617 if (rc == X86EMUL_PROPAGATE_FAULT) {
5618 WARN_ON(ctxt->exception.vector > 0x1f);
5619 ctxt->have_exception = true;
5620 }
5621 if (rc == X86EMUL_INTERCEPTED)
5622 return EMULATION_INTERCEPTED;
5623
5624 if (rc == X86EMUL_CONTINUE)
5625 writeback_registers(ctxt);
5626
5627 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
5628
5629 twobyte_insn:
5630 switch (ctxt->b) {
5631 case 0x09: /* wbinvd */
5632 (ctxt->ops->wbinvd)(ctxt);
5633 break;
5634 case 0x08: /* invd */
5635 case 0x0d: /* GrpP (prefetch) */
5636 case 0x18: /* Grp16 (prefetch/nop) */
5637 case 0x1f: /* nop */
5638 break;
5639 case 0x20: /* mov cr, reg */
5640 ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
5641 break;
5642 case 0x21: /* mov from dr to reg */
5643 ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
5644 break;
5645 case 0x40 ... 0x4f: /* cmov */
5646 if (test_cc(ctxt->b, ctxt->eflags))
5647 ctxt->dst.val = ctxt->src.val;
5648 else if (ctxt->op_bytes != 4)
5649 ctxt->dst.type = OP_NONE; /* no writeback */
5650 break;
5651 case 0x80 ... 0x8f: /* jnz rel, etc*/
5652 if (test_cc(ctxt->b, ctxt->eflags))
5653 rc = jmp_rel(ctxt, ctxt->src.val);
5654 break;
5655 case 0x90 ... 0x9f: /* setcc r/m8 */
5656 ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
5657 break;
5658 case 0xb6 ... 0xb7: /* movzx */
5659 ctxt->dst.bytes = ctxt->op_bytes;
5660 ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
5661 : (u16) ctxt->src.val;
5662 break;
5663 case 0xbe ... 0xbf: /* movsx */
5664 ctxt->dst.bytes = ctxt->op_bytes;
5665 ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
5666 (s16) ctxt->src.val;
5667 break;
5668 default:
5669 goto cannot_emulate;
5670 }
5671
5672 threebyte_insn:
5673
5674 if (rc != X86EMUL_CONTINUE)
5675 goto done;
5676
5677 goto writeback;
5678
5679 cannot_emulate:
5680 return EMULATION_FAILED;
5681 }
5682
5683 void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt)
5684 {
5685 invalidate_registers(ctxt);
5686 }
5687
5688 void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt)
5689 {
5690 writeback_registers(ctxt);
5691 }
5692
5693 bool emulator_can_use_gpa(struct x86_emulate_ctxt *ctxt)
5694 {
5695 if (ctxt->rep_prefix && (ctxt->d & String))
5696 return false;
5697
5698 if (ctxt->d & TwoMemOp)
5699 return false;
5700
5701 return true;
5702 }