]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/x86/kvm/emulate.c
KVM: emulator: fix error code for __linearize
[mirror_ubuntu-bionic-kernel.git] / arch / x86 / kvm / emulate.c
1 /******************************************************************************
2 * emulate.c
3 *
4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
5 *
6 * Copyright (c) 2005 Keir Fraser
7 *
8 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
9 * privileged instructions:
10 *
11 * Copyright (C) 2006 Qumranet
12 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
13 *
14 * Avi Kivity <avi@qumranet.com>
15 * Yaniv Kamay <yaniv@qumranet.com>
16 *
17 * This work is licensed under the terms of the GNU GPL, version 2. See
18 * the COPYING file in the top-level directory.
19 *
20 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
21 */
22
23 #include <linux/kvm_host.h>
24 #include "kvm_cache_regs.h"
25 #include <linux/module.h>
26 #include <asm/kvm_emulate.h>
27 #include <linux/stringify.h>
28
29 #include "x86.h"
30 #include "tss.h"
31
32 /*
33 * Operand types
34 */
35 #define OpNone 0ull
36 #define OpImplicit 1ull /* No generic decode */
37 #define OpReg 2ull /* Register */
38 #define OpMem 3ull /* Memory */
39 #define OpAcc 4ull /* Accumulator: AL/AX/EAX/RAX */
40 #define OpDI 5ull /* ES:DI/EDI/RDI */
41 #define OpMem64 6ull /* Memory, 64-bit */
42 #define OpImmUByte 7ull /* Zero-extended 8-bit immediate */
43 #define OpDX 8ull /* DX register */
44 #define OpCL 9ull /* CL register (for shifts) */
45 #define OpImmByte 10ull /* 8-bit sign extended immediate */
46 #define OpOne 11ull /* Implied 1 */
47 #define OpImm 12ull /* Sign extended up to 32-bit immediate */
48 #define OpMem16 13ull /* Memory operand (16-bit). */
49 #define OpMem32 14ull /* Memory operand (32-bit). */
50 #define OpImmU 15ull /* Immediate operand, zero extended */
51 #define OpSI 16ull /* SI/ESI/RSI */
52 #define OpImmFAddr 17ull /* Immediate far address */
53 #define OpMemFAddr 18ull /* Far address in memory */
54 #define OpImmU16 19ull /* Immediate operand, 16 bits, zero extended */
55 #define OpES 20ull /* ES */
56 #define OpCS 21ull /* CS */
57 #define OpSS 22ull /* SS */
58 #define OpDS 23ull /* DS */
59 #define OpFS 24ull /* FS */
60 #define OpGS 25ull /* GS */
61 #define OpMem8 26ull /* 8-bit zero extended memory operand */
62 #define OpImm64 27ull /* Sign extended 16/32/64-bit immediate */
63 #define OpXLat 28ull /* memory at BX/EBX/RBX + zero-extended AL */
64 #define OpAccLo 29ull /* Low part of extended acc (AX/AX/EAX/RAX) */
65 #define OpAccHi 30ull /* High part of extended acc (-/DX/EDX/RDX) */
66
67 #define OpBits 5 /* Width of operand field */
68 #define OpMask ((1ull << OpBits) - 1)
69
70 /*
71 * Opcode effective-address decode tables.
72 * Note that we only emulate instructions that have at least one memory
73 * operand (excluding implicit stack references). We assume that stack
74 * references and instruction fetches will never occur in special memory
75 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
76 * not be handled.
77 */
78
79 /* Operand sizes: 8-bit operands or specified/overridden size. */
80 #define ByteOp (1<<0) /* 8-bit operands. */
81 /* Destination operand type. */
82 #define DstShift 1
83 #define ImplicitOps (OpImplicit << DstShift)
84 #define DstReg (OpReg << DstShift)
85 #define DstMem (OpMem << DstShift)
86 #define DstAcc (OpAcc << DstShift)
87 #define DstDI (OpDI << DstShift)
88 #define DstMem64 (OpMem64 << DstShift)
89 #define DstImmUByte (OpImmUByte << DstShift)
90 #define DstDX (OpDX << DstShift)
91 #define DstAccLo (OpAccLo << DstShift)
92 #define DstMask (OpMask << DstShift)
93 /* Source operand type. */
94 #define SrcShift 6
95 #define SrcNone (OpNone << SrcShift)
96 #define SrcReg (OpReg << SrcShift)
97 #define SrcMem (OpMem << SrcShift)
98 #define SrcMem16 (OpMem16 << SrcShift)
99 #define SrcMem32 (OpMem32 << SrcShift)
100 #define SrcImm (OpImm << SrcShift)
101 #define SrcImmByte (OpImmByte << SrcShift)
102 #define SrcOne (OpOne << SrcShift)
103 #define SrcImmUByte (OpImmUByte << SrcShift)
104 #define SrcImmU (OpImmU << SrcShift)
105 #define SrcSI (OpSI << SrcShift)
106 #define SrcXLat (OpXLat << SrcShift)
107 #define SrcImmFAddr (OpImmFAddr << SrcShift)
108 #define SrcMemFAddr (OpMemFAddr << SrcShift)
109 #define SrcAcc (OpAcc << SrcShift)
110 #define SrcImmU16 (OpImmU16 << SrcShift)
111 #define SrcImm64 (OpImm64 << SrcShift)
112 #define SrcDX (OpDX << SrcShift)
113 #define SrcMem8 (OpMem8 << SrcShift)
114 #define SrcAccHi (OpAccHi << SrcShift)
115 #define SrcMask (OpMask << SrcShift)
116 #define BitOp (1<<11)
117 #define MemAbs (1<<12) /* Memory operand is absolute displacement */
118 #define String (1<<13) /* String instruction (rep capable) */
119 #define Stack (1<<14) /* Stack instruction (push/pop) */
120 #define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */
121 #define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */
122 #define GroupDual (2<<15) /* Alternate decoding of mod == 3 */
123 #define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */
124 #define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */
125 #define Escape (5<<15) /* Escape to coprocessor instruction */
126 #define Sse (1<<18) /* SSE Vector instruction */
127 /* Generic ModRM decode. */
128 #define ModRM (1<<19)
129 /* Destination is only written; never read. */
130 #define Mov (1<<20)
131 /* Misc flags */
132 #define Prot (1<<21) /* instruction generates #UD if not in prot-mode */
133 #define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */
134 #define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
135 #define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */
136 #define Undefined (1<<25) /* No Such Instruction */
137 #define Lock (1<<26) /* lock prefix is allowed for the instruction */
138 #define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
139 #define No64 (1<<28)
140 #define PageTable (1 << 29) /* instruction used to write page table */
141 #define NotImpl (1 << 30) /* instruction is not implemented */
142 /* Source 2 operand type */
143 #define Src2Shift (31)
144 #define Src2None (OpNone << Src2Shift)
145 #define Src2Mem (OpMem << Src2Shift)
146 #define Src2CL (OpCL << Src2Shift)
147 #define Src2ImmByte (OpImmByte << Src2Shift)
148 #define Src2One (OpOne << Src2Shift)
149 #define Src2Imm (OpImm << Src2Shift)
150 #define Src2ES (OpES << Src2Shift)
151 #define Src2CS (OpCS << Src2Shift)
152 #define Src2SS (OpSS << Src2Shift)
153 #define Src2DS (OpDS << Src2Shift)
154 #define Src2FS (OpFS << Src2Shift)
155 #define Src2GS (OpGS << Src2Shift)
156 #define Src2Mask (OpMask << Src2Shift)
157 #define Mmx ((u64)1 << 40) /* MMX Vector instruction */
158 #define Aligned ((u64)1 << 41) /* Explicitly aligned (e.g. MOVDQA) */
159 #define Unaligned ((u64)1 << 42) /* Explicitly unaligned (e.g. MOVDQU) */
160 #define Avx ((u64)1 << 43) /* Advanced Vector Extensions */
161 #define Fastop ((u64)1 << 44) /* Use opcode::u.fastop */
162 #define NoWrite ((u64)1 << 45) /* No writeback */
163 #define SrcWrite ((u64)1 << 46) /* Write back src operand */
164 #define NoMod ((u64)1 << 47) /* Mod field is ignored */
165 #define Intercept ((u64)1 << 48) /* Has valid intercept field */
166 #define CheckPerm ((u64)1 << 49) /* Has valid check_perm field */
167 #define NoBigReal ((u64)1 << 50) /* No big real mode */
168 #define PrivUD ((u64)1 << 51) /* #UD instead of #GP on CPL > 0 */
169
170 #define DstXacc (DstAccLo | SrcAccHi | SrcWrite)
171
172 #define X2(x...) x, x
173 #define X3(x...) X2(x), x
174 #define X4(x...) X2(x), X2(x)
175 #define X5(x...) X4(x), x
176 #define X6(x...) X4(x), X2(x)
177 #define X7(x...) X4(x), X3(x)
178 #define X8(x...) X4(x), X4(x)
179 #define X16(x...) X8(x), X8(x)
180
181 #define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
182 #define FASTOP_SIZE 8
183
184 /*
185 * fastop functions have a special calling convention:
186 *
187 * dst: rax (in/out)
188 * src: rdx (in/out)
189 * src2: rcx (in)
190 * flags: rflags (in/out)
191 * ex: rsi (in:fastop pointer, out:zero if exception)
192 *
193 * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
194 * different operand sizes can be reached by calculation, rather than a jump
195 * table (which would be bigger than the code).
196 *
197 * fastop functions are declared as taking a never-defined fastop parameter,
198 * so they can't be called from C directly.
199 */
200
201 struct fastop;
202
203 struct opcode {
204 u64 flags : 56;
205 u64 intercept : 8;
206 union {
207 int (*execute)(struct x86_emulate_ctxt *ctxt);
208 const struct opcode *group;
209 const struct group_dual *gdual;
210 const struct gprefix *gprefix;
211 const struct escape *esc;
212 void (*fastop)(struct fastop *fake);
213 } u;
214 int (*check_perm)(struct x86_emulate_ctxt *ctxt);
215 };
216
217 struct group_dual {
218 struct opcode mod012[8];
219 struct opcode mod3[8];
220 };
221
222 struct gprefix {
223 struct opcode pfx_no;
224 struct opcode pfx_66;
225 struct opcode pfx_f2;
226 struct opcode pfx_f3;
227 };
228
229 struct escape {
230 struct opcode op[8];
231 struct opcode high[64];
232 };
233
234 /* EFLAGS bit definitions. */
235 #define EFLG_ID (1<<21)
236 #define EFLG_VIP (1<<20)
237 #define EFLG_VIF (1<<19)
238 #define EFLG_AC (1<<18)
239 #define EFLG_VM (1<<17)
240 #define EFLG_RF (1<<16)
241 #define EFLG_IOPL (3<<12)
242 #define EFLG_NT (1<<14)
243 #define EFLG_OF (1<<11)
244 #define EFLG_DF (1<<10)
245 #define EFLG_IF (1<<9)
246 #define EFLG_TF (1<<8)
247 #define EFLG_SF (1<<7)
248 #define EFLG_ZF (1<<6)
249 #define EFLG_AF (1<<4)
250 #define EFLG_PF (1<<2)
251 #define EFLG_CF (1<<0)
252
253 #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
254 #define EFLG_RESERVED_ONE_MASK 2
255
256 static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr)
257 {
258 if (!(ctxt->regs_valid & (1 << nr))) {
259 ctxt->regs_valid |= 1 << nr;
260 ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr);
261 }
262 return ctxt->_regs[nr];
263 }
264
265 static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr)
266 {
267 ctxt->regs_valid |= 1 << nr;
268 ctxt->regs_dirty |= 1 << nr;
269 return &ctxt->_regs[nr];
270 }
271
272 static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr)
273 {
274 reg_read(ctxt, nr);
275 return reg_write(ctxt, nr);
276 }
277
278 static void writeback_registers(struct x86_emulate_ctxt *ctxt)
279 {
280 unsigned reg;
281
282 for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16)
283 ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]);
284 }
285
286 static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
287 {
288 ctxt->regs_dirty = 0;
289 ctxt->regs_valid = 0;
290 }
291
292 /*
293 * These EFLAGS bits are restored from saved value during emulation, and
294 * any changes are written back to the saved value after emulation.
295 */
296 #define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)
297
298 #ifdef CONFIG_X86_64
299 #define ON64(x) x
300 #else
301 #define ON64(x)
302 #endif
303
304 static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));
305
306 #define FOP_ALIGN ".align " __stringify(FASTOP_SIZE) " \n\t"
307 #define FOP_RET "ret \n\t"
308
309 #define FOP_START(op) \
310 extern void em_##op(struct fastop *fake); \
311 asm(".pushsection .text, \"ax\" \n\t" \
312 ".global em_" #op " \n\t" \
313 FOP_ALIGN \
314 "em_" #op ": \n\t"
315
316 #define FOP_END \
317 ".popsection")
318
319 #define FOPNOP() FOP_ALIGN FOP_RET
320
321 #define FOP1E(op, dst) \
322 FOP_ALIGN "10: " #op " %" #dst " \n\t" FOP_RET
323
324 #define FOP1EEX(op, dst) \
325 FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception)
326
327 #define FASTOP1(op) \
328 FOP_START(op) \
329 FOP1E(op##b, al) \
330 FOP1E(op##w, ax) \
331 FOP1E(op##l, eax) \
332 ON64(FOP1E(op##q, rax)) \
333 FOP_END
334
335 /* 1-operand, using src2 (for MUL/DIV r/m) */
336 #define FASTOP1SRC2(op, name) \
337 FOP_START(name) \
338 FOP1E(op, cl) \
339 FOP1E(op, cx) \
340 FOP1E(op, ecx) \
341 ON64(FOP1E(op, rcx)) \
342 FOP_END
343
344 /* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */
345 #define FASTOP1SRC2EX(op, name) \
346 FOP_START(name) \
347 FOP1EEX(op, cl) \
348 FOP1EEX(op, cx) \
349 FOP1EEX(op, ecx) \
350 ON64(FOP1EEX(op, rcx)) \
351 FOP_END
352
353 #define FOP2E(op, dst, src) \
354 FOP_ALIGN #op " %" #src ", %" #dst " \n\t" FOP_RET
355
356 #define FASTOP2(op) \
357 FOP_START(op) \
358 FOP2E(op##b, al, dl) \
359 FOP2E(op##w, ax, dx) \
360 FOP2E(op##l, eax, edx) \
361 ON64(FOP2E(op##q, rax, rdx)) \
362 FOP_END
363
364 /* 2 operand, word only */
365 #define FASTOP2W(op) \
366 FOP_START(op) \
367 FOPNOP() \
368 FOP2E(op##w, ax, dx) \
369 FOP2E(op##l, eax, edx) \
370 ON64(FOP2E(op##q, rax, rdx)) \
371 FOP_END
372
373 /* 2 operand, src is CL */
374 #define FASTOP2CL(op) \
375 FOP_START(op) \
376 FOP2E(op##b, al, cl) \
377 FOP2E(op##w, ax, cl) \
378 FOP2E(op##l, eax, cl) \
379 ON64(FOP2E(op##q, rax, cl)) \
380 FOP_END
381
382 #define FOP3E(op, dst, src, src2) \
383 FOP_ALIGN #op " %" #src2 ", %" #src ", %" #dst " \n\t" FOP_RET
384
385 /* 3-operand, word-only, src2=cl */
386 #define FASTOP3WCL(op) \
387 FOP_START(op) \
388 FOPNOP() \
389 FOP3E(op##w, ax, dx, cl) \
390 FOP3E(op##l, eax, edx, cl) \
391 ON64(FOP3E(op##q, rax, rdx, cl)) \
392 FOP_END
393
394 /* Special case for SETcc - 1 instruction per cc */
395 #define FOP_SETCC(op) ".align 4; " #op " %al; ret \n\t"
396
397 asm(".global kvm_fastop_exception \n"
398 "kvm_fastop_exception: xor %esi, %esi; ret");
399
400 FOP_START(setcc)
401 FOP_SETCC(seto)
402 FOP_SETCC(setno)
403 FOP_SETCC(setc)
404 FOP_SETCC(setnc)
405 FOP_SETCC(setz)
406 FOP_SETCC(setnz)
407 FOP_SETCC(setbe)
408 FOP_SETCC(setnbe)
409 FOP_SETCC(sets)
410 FOP_SETCC(setns)
411 FOP_SETCC(setp)
412 FOP_SETCC(setnp)
413 FOP_SETCC(setl)
414 FOP_SETCC(setnl)
415 FOP_SETCC(setle)
416 FOP_SETCC(setnle)
417 FOP_END;
418
419 FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET
420 FOP_END;
421
422 static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
423 enum x86_intercept intercept,
424 enum x86_intercept_stage stage)
425 {
426 struct x86_instruction_info info = {
427 .intercept = intercept,
428 .rep_prefix = ctxt->rep_prefix,
429 .modrm_mod = ctxt->modrm_mod,
430 .modrm_reg = ctxt->modrm_reg,
431 .modrm_rm = ctxt->modrm_rm,
432 .src_val = ctxt->src.val64,
433 .dst_val = ctxt->dst.val64,
434 .src_bytes = ctxt->src.bytes,
435 .dst_bytes = ctxt->dst.bytes,
436 .ad_bytes = ctxt->ad_bytes,
437 .next_rip = ctxt->eip,
438 };
439
440 return ctxt->ops->intercept(ctxt, &info, stage);
441 }
442
443 static void assign_masked(ulong *dest, ulong src, ulong mask)
444 {
445 *dest = (*dest & ~mask) | (src & mask);
446 }
447
448 static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
449 {
450 return (1UL << (ctxt->ad_bytes << 3)) - 1;
451 }
452
453 static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
454 {
455 u16 sel;
456 struct desc_struct ss;
457
458 if (ctxt->mode == X86EMUL_MODE_PROT64)
459 return ~0UL;
460 ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS);
461 return ~0U >> ((ss.d ^ 1) * 16); /* d=0: 0xffff; d=1: 0xffffffff */
462 }
463
464 static int stack_size(struct x86_emulate_ctxt *ctxt)
465 {
466 return (__fls(stack_mask(ctxt)) + 1) >> 3;
467 }
468
469 /* Access/update address held in a register, based on addressing mode. */
470 static inline unsigned long
471 address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
472 {
473 if (ctxt->ad_bytes == sizeof(unsigned long))
474 return reg;
475 else
476 return reg & ad_mask(ctxt);
477 }
478
479 static inline unsigned long
480 register_address(struct x86_emulate_ctxt *ctxt, unsigned long reg)
481 {
482 return address_mask(ctxt, reg);
483 }
484
485 static void masked_increment(ulong *reg, ulong mask, int inc)
486 {
487 assign_masked(reg, *reg + inc, mask);
488 }
489
490 static inline void
491 register_address_increment(struct x86_emulate_ctxt *ctxt, unsigned long *reg, int inc)
492 {
493 ulong mask;
494
495 if (ctxt->ad_bytes == sizeof(unsigned long))
496 mask = ~0UL;
497 else
498 mask = ad_mask(ctxt);
499 masked_increment(reg, mask, inc);
500 }
501
502 static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
503 {
504 masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
505 }
506
507 static u32 desc_limit_scaled(struct desc_struct *desc)
508 {
509 u32 limit = get_desc_limit(desc);
510
511 return desc->g ? (limit << 12) | 0xfff : limit;
512 }
513
514 static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
515 {
516 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
517 return 0;
518
519 return ctxt->ops->get_cached_segment_base(ctxt, seg);
520 }
521
522 static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
523 u32 error, bool valid)
524 {
525 WARN_ON(vec > 0x1f);
526 ctxt->exception.vector = vec;
527 ctxt->exception.error_code = error;
528 ctxt->exception.error_code_valid = valid;
529 return X86EMUL_PROPAGATE_FAULT;
530 }
531
532 static int emulate_db(struct x86_emulate_ctxt *ctxt)
533 {
534 return emulate_exception(ctxt, DB_VECTOR, 0, false);
535 }
536
537 static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
538 {
539 return emulate_exception(ctxt, GP_VECTOR, err, true);
540 }
541
542 static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
543 {
544 return emulate_exception(ctxt, SS_VECTOR, err, true);
545 }
546
547 static int emulate_ud(struct x86_emulate_ctxt *ctxt)
548 {
549 return emulate_exception(ctxt, UD_VECTOR, 0, false);
550 }
551
552 static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
553 {
554 return emulate_exception(ctxt, TS_VECTOR, err, true);
555 }
556
557 static int emulate_de(struct x86_emulate_ctxt *ctxt)
558 {
559 return emulate_exception(ctxt, DE_VECTOR, 0, false);
560 }
561
562 static int emulate_nm(struct x86_emulate_ctxt *ctxt)
563 {
564 return emulate_exception(ctxt, NM_VECTOR, 0, false);
565 }
566
567 static inline int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
568 int cs_l)
569 {
570 switch (ctxt->op_bytes) {
571 case 2:
572 ctxt->_eip = (u16)dst;
573 break;
574 case 4:
575 ctxt->_eip = (u32)dst;
576 break;
577 case 8:
578 if ((cs_l && is_noncanonical_address(dst)) ||
579 (!cs_l && (dst & ~(u32)-1)))
580 return emulate_gp(ctxt, 0);
581 ctxt->_eip = dst;
582 break;
583 default:
584 WARN(1, "unsupported eip assignment size\n");
585 }
586 return X86EMUL_CONTINUE;
587 }
588
589 static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
590 {
591 return assign_eip_far(ctxt, dst, ctxt->mode == X86EMUL_MODE_PROT64);
592 }
593
594 static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
595 {
596 return assign_eip_near(ctxt, ctxt->_eip + rel);
597 }
598
599 static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
600 {
601 u16 selector;
602 struct desc_struct desc;
603
604 ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
605 return selector;
606 }
607
608 static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
609 unsigned seg)
610 {
611 u16 dummy;
612 u32 base3;
613 struct desc_struct desc;
614
615 ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
616 ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
617 }
618
619 /*
620 * x86 defines three classes of vector instructions: explicitly
621 * aligned, explicitly unaligned, and the rest, which change behaviour
622 * depending on whether they're AVX encoded or not.
623 *
624 * Also included is CMPXCHG16B which is not a vector instruction, yet it is
625 * subject to the same check.
626 */
627 static bool insn_aligned(struct x86_emulate_ctxt *ctxt, unsigned size)
628 {
629 if (likely(size < 16))
630 return false;
631
632 if (ctxt->d & Aligned)
633 return true;
634 else if (ctxt->d & Unaligned)
635 return false;
636 else if (ctxt->d & Avx)
637 return false;
638 else
639 return true;
640 }
641
642 static int __linearize(struct x86_emulate_ctxt *ctxt,
643 struct segmented_address addr,
644 unsigned size, bool write, bool fetch,
645 ulong *linear)
646 {
647 struct desc_struct desc;
648 bool usable;
649 ulong la;
650 u32 lim;
651 u16 sel;
652 unsigned cpl;
653
654 la = seg_base(ctxt, addr.seg) + addr.ea;
655 switch (ctxt->mode) {
656 case X86EMUL_MODE_PROT64:
657 if (((signed long)la << 16) >> 16 != la)
658 return emulate_gp(ctxt, 0);
659 break;
660 default:
661 usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
662 addr.seg);
663 if (!usable)
664 goto bad;
665 /* code segment in protected mode or read-only data segment */
666 if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8))
667 || !(desc.type & 2)) && write)
668 goto bad;
669 /* unreadable code segment */
670 if (!fetch && (desc.type & 8) && !(desc.type & 2))
671 goto bad;
672 lim = desc_limit_scaled(&desc);
673 if ((ctxt->mode == X86EMUL_MODE_REAL) && !fetch &&
674 (ctxt->d & NoBigReal)) {
675 /* la is between zero and 0xffff */
676 if (la > 0xffff || (u32)(la + size - 1) > 0xffff)
677 goto bad;
678 } else if ((desc.type & 8) || !(desc.type & 4)) {
679 /* expand-up segment */
680 if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim)
681 goto bad;
682 } else {
683 /* expand-down segment */
684 if (addr.ea <= lim || (u32)(addr.ea + size - 1) <= lim)
685 goto bad;
686 lim = desc.d ? 0xffffffff : 0xffff;
687 if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim)
688 goto bad;
689 }
690 cpl = ctxt->ops->cpl(ctxt);
691 if (!(desc.type & 8)) {
692 /* data segment */
693 if (cpl > desc.dpl)
694 goto bad;
695 } else if ((desc.type & 8) && !(desc.type & 4)) {
696 /* nonconforming code segment */
697 if (cpl != desc.dpl)
698 goto bad;
699 } else if ((desc.type & 8) && (desc.type & 4)) {
700 /* conforming code segment */
701 if (cpl < desc.dpl)
702 goto bad;
703 }
704 break;
705 }
706 if (fetch ? ctxt->mode != X86EMUL_MODE_PROT64 : ctxt->ad_bytes != 8)
707 la &= (u32)-1;
708 if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0))
709 return emulate_gp(ctxt, 0);
710 *linear = la;
711 return X86EMUL_CONTINUE;
712 bad:
713 if (addr.seg == VCPU_SREG_SS)
714 return emulate_ss(ctxt, 0);
715 else
716 return emulate_gp(ctxt, 0);
717 }
718
719 static int linearize(struct x86_emulate_ctxt *ctxt,
720 struct segmented_address addr,
721 unsigned size, bool write,
722 ulong *linear)
723 {
724 return __linearize(ctxt, addr, size, write, false, linear);
725 }
726
727
728 static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
729 struct segmented_address addr,
730 void *data,
731 unsigned size)
732 {
733 int rc;
734 ulong linear;
735
736 rc = linearize(ctxt, addr, size, false, &linear);
737 if (rc != X86EMUL_CONTINUE)
738 return rc;
739 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
740 }
741
742 /*
743 * Prefetch the remaining bytes of the instruction without crossing page
744 * boundary if they are not in fetch_cache yet.
745 */
746 static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
747 {
748 int rc;
749 unsigned size;
750 unsigned long linear;
751 int cur_size = ctxt->fetch.end - ctxt->fetch.data;
752 struct segmented_address addr = { .seg = VCPU_SREG_CS,
753 .ea = ctxt->eip + cur_size };
754
755 size = 15UL ^ cur_size;
756 rc = __linearize(ctxt, addr, size, false, true, &linear);
757 if (unlikely(rc != X86EMUL_CONTINUE))
758 return rc;
759
760 size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
761
762 /*
763 * One instruction can only straddle two pages,
764 * and one has been loaded at the beginning of
765 * x86_decode_insn. So, if not enough bytes
766 * still, we must have hit the 15-byte boundary.
767 */
768 if (unlikely(size < op_size))
769 return X86EMUL_UNHANDLEABLE;
770 rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
771 size, &ctxt->exception);
772 if (unlikely(rc != X86EMUL_CONTINUE))
773 return rc;
774 ctxt->fetch.end += size;
775 return X86EMUL_CONTINUE;
776 }
777
778 static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
779 unsigned size)
780 {
781 unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr;
782
783 if (unlikely(done_size < size))
784 return __do_insn_fetch_bytes(ctxt, size - done_size);
785 else
786 return X86EMUL_CONTINUE;
787 }
788
789 /* Fetch next part of the instruction being emulated. */
790 #define insn_fetch(_type, _ctxt) \
791 ({ _type _x; \
792 \
793 rc = do_insn_fetch_bytes(_ctxt, sizeof(_type)); \
794 if (rc != X86EMUL_CONTINUE) \
795 goto done; \
796 ctxt->_eip += sizeof(_type); \
797 _x = *(_type __aligned(1) *) ctxt->fetch.ptr; \
798 ctxt->fetch.ptr += sizeof(_type); \
799 _x; \
800 })
801
802 #define insn_fetch_arr(_arr, _size, _ctxt) \
803 ({ \
804 rc = do_insn_fetch_bytes(_ctxt, _size); \
805 if (rc != X86EMUL_CONTINUE) \
806 goto done; \
807 ctxt->_eip += (_size); \
808 memcpy(_arr, ctxt->fetch.ptr, _size); \
809 ctxt->fetch.ptr += (_size); \
810 })
811
812 /*
813 * Given the 'reg' portion of a ModRM byte, and a register block, return a
814 * pointer into the block that addresses the relevant register.
815 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
816 */
817 static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg,
818 int byteop)
819 {
820 void *p;
821 int highbyte_regs = (ctxt->rex_prefix == 0) && byteop;
822
823 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
824 p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
825 else
826 p = reg_rmw(ctxt, modrm_reg);
827 return p;
828 }
829
830 static int read_descriptor(struct x86_emulate_ctxt *ctxt,
831 struct segmented_address addr,
832 u16 *size, unsigned long *address, int op_bytes)
833 {
834 int rc;
835
836 if (op_bytes == 2)
837 op_bytes = 3;
838 *address = 0;
839 rc = segmented_read_std(ctxt, addr, size, 2);
840 if (rc != X86EMUL_CONTINUE)
841 return rc;
842 addr.ea += 2;
843 rc = segmented_read_std(ctxt, addr, address, op_bytes);
844 return rc;
845 }
846
847 FASTOP2(add);
848 FASTOP2(or);
849 FASTOP2(adc);
850 FASTOP2(sbb);
851 FASTOP2(and);
852 FASTOP2(sub);
853 FASTOP2(xor);
854 FASTOP2(cmp);
855 FASTOP2(test);
856
857 FASTOP1SRC2(mul, mul_ex);
858 FASTOP1SRC2(imul, imul_ex);
859 FASTOP1SRC2EX(div, div_ex);
860 FASTOP1SRC2EX(idiv, idiv_ex);
861
862 FASTOP3WCL(shld);
863 FASTOP3WCL(shrd);
864
865 FASTOP2W(imul);
866
867 FASTOP1(not);
868 FASTOP1(neg);
869 FASTOP1(inc);
870 FASTOP1(dec);
871
872 FASTOP2CL(rol);
873 FASTOP2CL(ror);
874 FASTOP2CL(rcl);
875 FASTOP2CL(rcr);
876 FASTOP2CL(shl);
877 FASTOP2CL(shr);
878 FASTOP2CL(sar);
879
880 FASTOP2W(bsf);
881 FASTOP2W(bsr);
882 FASTOP2W(bt);
883 FASTOP2W(bts);
884 FASTOP2W(btr);
885 FASTOP2W(btc);
886
887 FASTOP2(xadd);
888
889 static u8 test_cc(unsigned int condition, unsigned long flags)
890 {
891 u8 rc;
892 void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf);
893
894 flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
895 asm("push %[flags]; popf; call *%[fastop]"
896 : "=a"(rc) : [fastop]"r"(fop), [flags]"r"(flags));
897 return rc;
898 }
899
900 static void fetch_register_operand(struct operand *op)
901 {
902 switch (op->bytes) {
903 case 1:
904 op->val = *(u8 *)op->addr.reg;
905 break;
906 case 2:
907 op->val = *(u16 *)op->addr.reg;
908 break;
909 case 4:
910 op->val = *(u32 *)op->addr.reg;
911 break;
912 case 8:
913 op->val = *(u64 *)op->addr.reg;
914 break;
915 }
916 }
917
918 static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
919 {
920 ctxt->ops->get_fpu(ctxt);
921 switch (reg) {
922 case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
923 case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
924 case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break;
925 case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break;
926 case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break;
927 case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break;
928 case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break;
929 case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break;
930 #ifdef CONFIG_X86_64
931 case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break;
932 case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break;
933 case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break;
934 case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break;
935 case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break;
936 case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break;
937 case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break;
938 case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break;
939 #endif
940 default: BUG();
941 }
942 ctxt->ops->put_fpu(ctxt);
943 }
944
945 static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
946 int reg)
947 {
948 ctxt->ops->get_fpu(ctxt);
949 switch (reg) {
950 case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
951 case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
952 case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break;
953 case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break;
954 case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break;
955 case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break;
956 case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break;
957 case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break;
958 #ifdef CONFIG_X86_64
959 case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break;
960 case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break;
961 case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break;
962 case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break;
963 case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break;
964 case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break;
965 case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break;
966 case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break;
967 #endif
968 default: BUG();
969 }
970 ctxt->ops->put_fpu(ctxt);
971 }
972
973 static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
974 {
975 ctxt->ops->get_fpu(ctxt);
976 switch (reg) {
977 case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
978 case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
979 case 2: asm("movq %%mm2, %0" : "=m"(*data)); break;
980 case 3: asm("movq %%mm3, %0" : "=m"(*data)); break;
981 case 4: asm("movq %%mm4, %0" : "=m"(*data)); break;
982 case 5: asm("movq %%mm5, %0" : "=m"(*data)); break;
983 case 6: asm("movq %%mm6, %0" : "=m"(*data)); break;
984 case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
985 default: BUG();
986 }
987 ctxt->ops->put_fpu(ctxt);
988 }
989
990 static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
991 {
992 ctxt->ops->get_fpu(ctxt);
993 switch (reg) {
994 case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
995 case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
996 case 2: asm("movq %0, %%mm2" : : "m"(*data)); break;
997 case 3: asm("movq %0, %%mm3" : : "m"(*data)); break;
998 case 4: asm("movq %0, %%mm4" : : "m"(*data)); break;
999 case 5: asm("movq %0, %%mm5" : : "m"(*data)); break;
1000 case 6: asm("movq %0, %%mm6" : : "m"(*data)); break;
1001 case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
1002 default: BUG();
1003 }
1004 ctxt->ops->put_fpu(ctxt);
1005 }
1006
1007 static int em_fninit(struct x86_emulate_ctxt *ctxt)
1008 {
1009 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1010 return emulate_nm(ctxt);
1011
1012 ctxt->ops->get_fpu(ctxt);
1013 asm volatile("fninit");
1014 ctxt->ops->put_fpu(ctxt);
1015 return X86EMUL_CONTINUE;
1016 }
1017
1018 static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
1019 {
1020 u16 fcw;
1021
1022 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1023 return emulate_nm(ctxt);
1024
1025 ctxt->ops->get_fpu(ctxt);
1026 asm volatile("fnstcw %0": "+m"(fcw));
1027 ctxt->ops->put_fpu(ctxt);
1028
1029 /* force 2 byte destination */
1030 ctxt->dst.bytes = 2;
1031 ctxt->dst.val = fcw;
1032
1033 return X86EMUL_CONTINUE;
1034 }
1035
1036 static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
1037 {
1038 u16 fsw;
1039
1040 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1041 return emulate_nm(ctxt);
1042
1043 ctxt->ops->get_fpu(ctxt);
1044 asm volatile("fnstsw %0": "+m"(fsw));
1045 ctxt->ops->put_fpu(ctxt);
1046
1047 /* force 2 byte destination */
1048 ctxt->dst.bytes = 2;
1049 ctxt->dst.val = fsw;
1050
1051 return X86EMUL_CONTINUE;
1052 }
1053
1054 static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
1055 struct operand *op)
1056 {
1057 unsigned reg = ctxt->modrm_reg;
1058
1059 if (!(ctxt->d & ModRM))
1060 reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
1061
1062 if (ctxt->d & Sse) {
1063 op->type = OP_XMM;
1064 op->bytes = 16;
1065 op->addr.xmm = reg;
1066 read_sse_reg(ctxt, &op->vec_val, reg);
1067 return;
1068 }
1069 if (ctxt->d & Mmx) {
1070 reg &= 7;
1071 op->type = OP_MM;
1072 op->bytes = 8;
1073 op->addr.mm = reg;
1074 return;
1075 }
1076
1077 op->type = OP_REG;
1078 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1079 op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp);
1080
1081 fetch_register_operand(op);
1082 op->orig_val = op->val;
1083 }
1084
1085 static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
1086 {
1087 if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
1088 ctxt->modrm_seg = VCPU_SREG_SS;
1089 }
1090
1091 static int decode_modrm(struct x86_emulate_ctxt *ctxt,
1092 struct operand *op)
1093 {
1094 u8 sib;
1095 int index_reg, base_reg, scale;
1096 int rc = X86EMUL_CONTINUE;
1097 ulong modrm_ea = 0;
1098
1099 ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8); /* REX.R */
1100 index_reg = (ctxt->rex_prefix << 2) & 8; /* REX.X */
1101 base_reg = (ctxt->rex_prefix << 3) & 8; /* REX.B */
1102
1103 ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6;
1104 ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
1105 ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07);
1106 ctxt->modrm_seg = VCPU_SREG_DS;
1107
1108 if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) {
1109 op->type = OP_REG;
1110 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1111 op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
1112 ctxt->d & ByteOp);
1113 if (ctxt->d & Sse) {
1114 op->type = OP_XMM;
1115 op->bytes = 16;
1116 op->addr.xmm = ctxt->modrm_rm;
1117 read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
1118 return rc;
1119 }
1120 if (ctxt->d & Mmx) {
1121 op->type = OP_MM;
1122 op->bytes = 8;
1123 op->addr.mm = ctxt->modrm_rm & 7;
1124 return rc;
1125 }
1126 fetch_register_operand(op);
1127 return rc;
1128 }
1129
1130 op->type = OP_MEM;
1131
1132 if (ctxt->ad_bytes == 2) {
1133 unsigned bx = reg_read(ctxt, VCPU_REGS_RBX);
1134 unsigned bp = reg_read(ctxt, VCPU_REGS_RBP);
1135 unsigned si = reg_read(ctxt, VCPU_REGS_RSI);
1136 unsigned di = reg_read(ctxt, VCPU_REGS_RDI);
1137
1138 /* 16-bit ModR/M decode. */
1139 switch (ctxt->modrm_mod) {
1140 case 0:
1141 if (ctxt->modrm_rm == 6)
1142 modrm_ea += insn_fetch(u16, ctxt);
1143 break;
1144 case 1:
1145 modrm_ea += insn_fetch(s8, ctxt);
1146 break;
1147 case 2:
1148 modrm_ea += insn_fetch(u16, ctxt);
1149 break;
1150 }
1151 switch (ctxt->modrm_rm) {
1152 case 0:
1153 modrm_ea += bx + si;
1154 break;
1155 case 1:
1156 modrm_ea += bx + di;
1157 break;
1158 case 2:
1159 modrm_ea += bp + si;
1160 break;
1161 case 3:
1162 modrm_ea += bp + di;
1163 break;
1164 case 4:
1165 modrm_ea += si;
1166 break;
1167 case 5:
1168 modrm_ea += di;
1169 break;
1170 case 6:
1171 if (ctxt->modrm_mod != 0)
1172 modrm_ea += bp;
1173 break;
1174 case 7:
1175 modrm_ea += bx;
1176 break;
1177 }
1178 if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
1179 (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
1180 ctxt->modrm_seg = VCPU_SREG_SS;
1181 modrm_ea = (u16)modrm_ea;
1182 } else {
1183 /* 32/64-bit ModR/M decode. */
1184 if ((ctxt->modrm_rm & 7) == 4) {
1185 sib = insn_fetch(u8, ctxt);
1186 index_reg |= (sib >> 3) & 7;
1187 base_reg |= sib & 7;
1188 scale = sib >> 6;
1189
1190 if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
1191 modrm_ea += insn_fetch(s32, ctxt);
1192 else {
1193 modrm_ea += reg_read(ctxt, base_reg);
1194 adjust_modrm_seg(ctxt, base_reg);
1195 }
1196 if (index_reg != 4)
1197 modrm_ea += reg_read(ctxt, index_reg) << scale;
1198 } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
1199 if (ctxt->mode == X86EMUL_MODE_PROT64)
1200 ctxt->rip_relative = 1;
1201 } else {
1202 base_reg = ctxt->modrm_rm;
1203 modrm_ea += reg_read(ctxt, base_reg);
1204 adjust_modrm_seg(ctxt, base_reg);
1205 }
1206 switch (ctxt->modrm_mod) {
1207 case 0:
1208 if (ctxt->modrm_rm == 5)
1209 modrm_ea += insn_fetch(s32, ctxt);
1210 break;
1211 case 1:
1212 modrm_ea += insn_fetch(s8, ctxt);
1213 break;
1214 case 2:
1215 modrm_ea += insn_fetch(s32, ctxt);
1216 break;
1217 }
1218 }
1219 op->addr.mem.ea = modrm_ea;
1220 if (ctxt->ad_bytes != 8)
1221 ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
1222
1223 done:
1224 return rc;
1225 }
1226
1227 static int decode_abs(struct x86_emulate_ctxt *ctxt,
1228 struct operand *op)
1229 {
1230 int rc = X86EMUL_CONTINUE;
1231
1232 op->type = OP_MEM;
1233 switch (ctxt->ad_bytes) {
1234 case 2:
1235 op->addr.mem.ea = insn_fetch(u16, ctxt);
1236 break;
1237 case 4:
1238 op->addr.mem.ea = insn_fetch(u32, ctxt);
1239 break;
1240 case 8:
1241 op->addr.mem.ea = insn_fetch(u64, ctxt);
1242 break;
1243 }
1244 done:
1245 return rc;
1246 }
1247
1248 static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
1249 {
1250 long sv = 0, mask;
1251
1252 if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
1253 mask = ~((long)ctxt->dst.bytes * 8 - 1);
1254
1255 if (ctxt->src.bytes == 2)
1256 sv = (s16)ctxt->src.val & (s16)mask;
1257 else if (ctxt->src.bytes == 4)
1258 sv = (s32)ctxt->src.val & (s32)mask;
1259 else
1260 sv = (s64)ctxt->src.val & (s64)mask;
1261
1262 ctxt->dst.addr.mem.ea += (sv >> 3);
1263 }
1264
1265 /* only subword offset */
1266 ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
1267 }
1268
1269 static int read_emulated(struct x86_emulate_ctxt *ctxt,
1270 unsigned long addr, void *dest, unsigned size)
1271 {
1272 int rc;
1273 struct read_cache *mc = &ctxt->mem_read;
1274
1275 if (mc->pos < mc->end)
1276 goto read_cached;
1277
1278 WARN_ON((mc->end + size) >= sizeof(mc->data));
1279
1280 rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size,
1281 &ctxt->exception);
1282 if (rc != X86EMUL_CONTINUE)
1283 return rc;
1284
1285 mc->end += size;
1286
1287 read_cached:
1288 memcpy(dest, mc->data + mc->pos, size);
1289 mc->pos += size;
1290 return X86EMUL_CONTINUE;
1291 }
1292
1293 static int segmented_read(struct x86_emulate_ctxt *ctxt,
1294 struct segmented_address addr,
1295 void *data,
1296 unsigned size)
1297 {
1298 int rc;
1299 ulong linear;
1300
1301 rc = linearize(ctxt, addr, size, false, &linear);
1302 if (rc != X86EMUL_CONTINUE)
1303 return rc;
1304 return read_emulated(ctxt, linear, data, size);
1305 }
1306
1307 static int segmented_write(struct x86_emulate_ctxt *ctxt,
1308 struct segmented_address addr,
1309 const void *data,
1310 unsigned size)
1311 {
1312 int rc;
1313 ulong linear;
1314
1315 rc = linearize(ctxt, addr, size, true, &linear);
1316 if (rc != X86EMUL_CONTINUE)
1317 return rc;
1318 return ctxt->ops->write_emulated(ctxt, linear, data, size,
1319 &ctxt->exception);
1320 }
1321
1322 static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
1323 struct segmented_address addr,
1324 const void *orig_data, const void *data,
1325 unsigned size)
1326 {
1327 int rc;
1328 ulong linear;
1329
1330 rc = linearize(ctxt, addr, size, true, &linear);
1331 if (rc != X86EMUL_CONTINUE)
1332 return rc;
1333 return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
1334 size, &ctxt->exception);
1335 }
1336
1337 static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
1338 unsigned int size, unsigned short port,
1339 void *dest)
1340 {
1341 struct read_cache *rc = &ctxt->io_read;
1342
1343 if (rc->pos == rc->end) { /* refill pio read ahead */
1344 unsigned int in_page, n;
1345 unsigned int count = ctxt->rep_prefix ?
1346 address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
1347 in_page = (ctxt->eflags & EFLG_DF) ?
1348 offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
1349 PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
1350 n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count);
1351 if (n == 0)
1352 n = 1;
1353 rc->pos = rc->end = 0;
1354 if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
1355 return 0;
1356 rc->end = n * size;
1357 }
1358
1359 if (ctxt->rep_prefix && (ctxt->d & String) &&
1360 !(ctxt->eflags & EFLG_DF)) {
1361 ctxt->dst.data = rc->data + rc->pos;
1362 ctxt->dst.type = OP_MEM_STR;
1363 ctxt->dst.count = (rc->end - rc->pos) / size;
1364 rc->pos = rc->end;
1365 } else {
1366 memcpy(dest, rc->data + rc->pos, size);
1367 rc->pos += size;
1368 }
1369 return 1;
1370 }
1371
1372 static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
1373 u16 index, struct desc_struct *desc)
1374 {
1375 struct desc_ptr dt;
1376 ulong addr;
1377
1378 ctxt->ops->get_idt(ctxt, &dt);
1379
1380 if (dt.size < index * 8 + 7)
1381 return emulate_gp(ctxt, index << 3 | 0x2);
1382
1383 addr = dt.address + index * 8;
1384 return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
1385 &ctxt->exception);
1386 }
1387
1388 static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
1389 u16 selector, struct desc_ptr *dt)
1390 {
1391 const struct x86_emulate_ops *ops = ctxt->ops;
1392 u32 base3 = 0;
1393
1394 if (selector & 1 << 2) {
1395 struct desc_struct desc;
1396 u16 sel;
1397
1398 memset (dt, 0, sizeof *dt);
1399 if (!ops->get_segment(ctxt, &sel, &desc, &base3,
1400 VCPU_SREG_LDTR))
1401 return;
1402
1403 dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
1404 dt->address = get_desc_base(&desc) | ((u64)base3 << 32);
1405 } else
1406 ops->get_gdt(ctxt, dt);
1407 }
1408
1409 /* allowed just for 8 bytes segments */
1410 static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1411 u16 selector, struct desc_struct *desc,
1412 ulong *desc_addr_p)
1413 {
1414 struct desc_ptr dt;
1415 u16 index = selector >> 3;
1416 ulong addr;
1417
1418 get_descriptor_table_ptr(ctxt, selector, &dt);
1419
1420 if (dt.size < index * 8 + 7)
1421 return emulate_gp(ctxt, selector & 0xfffc);
1422
1423 *desc_addr_p = addr = dt.address + index * 8;
1424 return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
1425 &ctxt->exception);
1426 }
1427
1428 /* allowed just for 8 bytes segments */
1429 static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1430 u16 selector, struct desc_struct *desc)
1431 {
1432 struct desc_ptr dt;
1433 u16 index = selector >> 3;
1434 ulong addr;
1435
1436 get_descriptor_table_ptr(ctxt, selector, &dt);
1437
1438 if (dt.size < index * 8 + 7)
1439 return emulate_gp(ctxt, selector & 0xfffc);
1440
1441 addr = dt.address + index * 8;
1442 return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc,
1443 &ctxt->exception);
1444 }
1445
1446 /* Does not support long mode */
1447 static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1448 u16 selector, int seg, u8 cpl,
1449 bool in_task_switch,
1450 struct desc_struct *desc)
1451 {
1452 struct desc_struct seg_desc, old_desc;
1453 u8 dpl, rpl;
1454 unsigned err_vec = GP_VECTOR;
1455 u32 err_code = 0;
1456 bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
1457 ulong desc_addr;
1458 int ret;
1459 u16 dummy;
1460 u32 base3 = 0;
1461
1462 memset(&seg_desc, 0, sizeof seg_desc);
1463
1464 if (ctxt->mode == X86EMUL_MODE_REAL) {
1465 /* set real mode segment descriptor (keep limit etc. for
1466 * unreal mode) */
1467 ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg);
1468 set_desc_base(&seg_desc, selector << 4);
1469 goto load;
1470 } else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) {
1471 /* VM86 needs a clean new segment descriptor */
1472 set_desc_base(&seg_desc, selector << 4);
1473 set_desc_limit(&seg_desc, 0xffff);
1474 seg_desc.type = 3;
1475 seg_desc.p = 1;
1476 seg_desc.s = 1;
1477 seg_desc.dpl = 3;
1478 goto load;
1479 }
1480
1481 rpl = selector & 3;
1482
1483 /* NULL selector is not valid for TR, CS and SS (except for long mode) */
1484 if ((seg == VCPU_SREG_CS
1485 || (seg == VCPU_SREG_SS
1486 && (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl))
1487 || seg == VCPU_SREG_TR)
1488 && null_selector)
1489 goto exception;
1490
1491 /* TR should be in GDT only */
1492 if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1493 goto exception;
1494
1495 if (null_selector) /* for NULL selector skip all following checks */
1496 goto load;
1497
1498 ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
1499 if (ret != X86EMUL_CONTINUE)
1500 return ret;
1501
1502 err_code = selector & 0xfffc;
1503 err_vec = in_task_switch ? TS_VECTOR : GP_VECTOR;
1504
1505 /* can't load system descriptor into segment selector */
1506 if (seg <= VCPU_SREG_GS && !seg_desc.s)
1507 goto exception;
1508
1509 if (!seg_desc.p) {
1510 err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1511 goto exception;
1512 }
1513
1514 dpl = seg_desc.dpl;
1515
1516 switch (seg) {
1517 case VCPU_SREG_SS:
1518 /*
1519 * segment is not a writable data segment or segment
1520 * selector's RPL != CPL or segment selector's RPL != CPL
1521 */
1522 if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1523 goto exception;
1524 break;
1525 case VCPU_SREG_CS:
1526 if (!(seg_desc.type & 8))
1527 goto exception;
1528
1529 if (seg_desc.type & 4) {
1530 /* conforming */
1531 if (dpl > cpl)
1532 goto exception;
1533 } else {
1534 /* nonconforming */
1535 if (rpl > cpl || dpl != cpl)
1536 goto exception;
1537 }
1538 /* in long-mode d/b must be clear if l is set */
1539 if (seg_desc.d && seg_desc.l) {
1540 u64 efer = 0;
1541
1542 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1543 if (efer & EFER_LMA)
1544 goto exception;
1545 }
1546
1547 /* CS(RPL) <- CPL */
1548 selector = (selector & 0xfffc) | cpl;
1549 break;
1550 case VCPU_SREG_TR:
1551 if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1552 goto exception;
1553 old_desc = seg_desc;
1554 seg_desc.type |= 2; /* busy */
1555 ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
1556 sizeof(seg_desc), &ctxt->exception);
1557 if (ret != X86EMUL_CONTINUE)
1558 return ret;
1559 break;
1560 case VCPU_SREG_LDTR:
1561 if (seg_desc.s || seg_desc.type != 2)
1562 goto exception;
1563 break;
1564 default: /* DS, ES, FS, or GS */
1565 /*
1566 * segment is not a data or readable code segment or
1567 * ((segment is a data or nonconforming code segment)
1568 * and (both RPL and CPL > DPL))
1569 */
1570 if ((seg_desc.type & 0xa) == 0x8 ||
1571 (((seg_desc.type & 0xc) != 0xc) &&
1572 (rpl > dpl && cpl > dpl)))
1573 goto exception;
1574 break;
1575 }
1576
1577 if (seg_desc.s) {
1578 /* mark segment as accessed */
1579 seg_desc.type |= 1;
1580 ret = write_segment_descriptor(ctxt, selector, &seg_desc);
1581 if (ret != X86EMUL_CONTINUE)
1582 return ret;
1583 } else if (ctxt->mode == X86EMUL_MODE_PROT64) {
1584 ret = ctxt->ops->read_std(ctxt, desc_addr+8, &base3,
1585 sizeof(base3), &ctxt->exception);
1586 if (ret != X86EMUL_CONTINUE)
1587 return ret;
1588 }
1589 load:
1590 ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
1591 if (desc)
1592 *desc = seg_desc;
1593 return X86EMUL_CONTINUE;
1594 exception:
1595 return emulate_exception(ctxt, err_vec, err_code, true);
1596 }
1597
1598 static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1599 u16 selector, int seg)
1600 {
1601 u8 cpl = ctxt->ops->cpl(ctxt);
1602 return __load_segment_descriptor(ctxt, selector, seg, cpl, false, NULL);
1603 }
1604
1605 static void write_register_operand(struct operand *op)
1606 {
1607 /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
1608 switch (op->bytes) {
1609 case 1:
1610 *(u8 *)op->addr.reg = (u8)op->val;
1611 break;
1612 case 2:
1613 *(u16 *)op->addr.reg = (u16)op->val;
1614 break;
1615 case 4:
1616 *op->addr.reg = (u32)op->val;
1617 break; /* 64b: zero-extend */
1618 case 8:
1619 *op->addr.reg = op->val;
1620 break;
1621 }
1622 }
1623
1624 static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
1625 {
1626 switch (op->type) {
1627 case OP_REG:
1628 write_register_operand(op);
1629 break;
1630 case OP_MEM:
1631 if (ctxt->lock_prefix)
1632 return segmented_cmpxchg(ctxt,
1633 op->addr.mem,
1634 &op->orig_val,
1635 &op->val,
1636 op->bytes);
1637 else
1638 return segmented_write(ctxt,
1639 op->addr.mem,
1640 &op->val,
1641 op->bytes);
1642 break;
1643 case OP_MEM_STR:
1644 return segmented_write(ctxt,
1645 op->addr.mem,
1646 op->data,
1647 op->bytes * op->count);
1648 break;
1649 case OP_XMM:
1650 write_sse_reg(ctxt, &op->vec_val, op->addr.xmm);
1651 break;
1652 case OP_MM:
1653 write_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
1654 break;
1655 case OP_NONE:
1656 /* no writeback */
1657 break;
1658 default:
1659 break;
1660 }
1661 return X86EMUL_CONTINUE;
1662 }
1663
1664 static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
1665 {
1666 struct segmented_address addr;
1667
1668 rsp_increment(ctxt, -bytes);
1669 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1670 addr.seg = VCPU_SREG_SS;
1671
1672 return segmented_write(ctxt, addr, data, bytes);
1673 }
1674
1675 static int em_push(struct x86_emulate_ctxt *ctxt)
1676 {
1677 /* Disable writeback. */
1678 ctxt->dst.type = OP_NONE;
1679 return push(ctxt, &ctxt->src.val, ctxt->op_bytes);
1680 }
1681
1682 static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1683 void *dest, int len)
1684 {
1685 int rc;
1686 struct segmented_address addr;
1687
1688 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1689 addr.seg = VCPU_SREG_SS;
1690 rc = segmented_read(ctxt, addr, dest, len);
1691 if (rc != X86EMUL_CONTINUE)
1692 return rc;
1693
1694 rsp_increment(ctxt, len);
1695 return rc;
1696 }
1697
1698 static int em_pop(struct x86_emulate_ctxt *ctxt)
1699 {
1700 return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1701 }
1702
1703 static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1704 void *dest, int len)
1705 {
1706 int rc;
1707 unsigned long val, change_mask;
1708 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
1709 int cpl = ctxt->ops->cpl(ctxt);
1710
1711 rc = emulate_pop(ctxt, &val, len);
1712 if (rc != X86EMUL_CONTINUE)
1713 return rc;
1714
1715 change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF
1716 | EFLG_TF | EFLG_DF | EFLG_NT | EFLG_AC | EFLG_ID;
1717
1718 switch(ctxt->mode) {
1719 case X86EMUL_MODE_PROT64:
1720 case X86EMUL_MODE_PROT32:
1721 case X86EMUL_MODE_PROT16:
1722 if (cpl == 0)
1723 change_mask |= EFLG_IOPL;
1724 if (cpl <= iopl)
1725 change_mask |= EFLG_IF;
1726 break;
1727 case X86EMUL_MODE_VM86:
1728 if (iopl < 3)
1729 return emulate_gp(ctxt, 0);
1730 change_mask |= EFLG_IF;
1731 break;
1732 default: /* real mode */
1733 change_mask |= (EFLG_IOPL | EFLG_IF);
1734 break;
1735 }
1736
1737 *(unsigned long *)dest =
1738 (ctxt->eflags & ~change_mask) | (val & change_mask);
1739
1740 return rc;
1741 }
1742
1743 static int em_popf(struct x86_emulate_ctxt *ctxt)
1744 {
1745 ctxt->dst.type = OP_REG;
1746 ctxt->dst.addr.reg = &ctxt->eflags;
1747 ctxt->dst.bytes = ctxt->op_bytes;
1748 return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1749 }
1750
1751 static int em_enter(struct x86_emulate_ctxt *ctxt)
1752 {
1753 int rc;
1754 unsigned frame_size = ctxt->src.val;
1755 unsigned nesting_level = ctxt->src2.val & 31;
1756 ulong rbp;
1757
1758 if (nesting_level)
1759 return X86EMUL_UNHANDLEABLE;
1760
1761 rbp = reg_read(ctxt, VCPU_REGS_RBP);
1762 rc = push(ctxt, &rbp, stack_size(ctxt));
1763 if (rc != X86EMUL_CONTINUE)
1764 return rc;
1765 assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP),
1766 stack_mask(ctxt));
1767 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP),
1768 reg_read(ctxt, VCPU_REGS_RSP) - frame_size,
1769 stack_mask(ctxt));
1770 return X86EMUL_CONTINUE;
1771 }
1772
1773 static int em_leave(struct x86_emulate_ctxt *ctxt)
1774 {
1775 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP),
1776 stack_mask(ctxt));
1777 return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes);
1778 }
1779
1780 static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
1781 {
1782 int seg = ctxt->src2.val;
1783
1784 ctxt->src.val = get_segment_selector(ctxt, seg);
1785
1786 return em_push(ctxt);
1787 }
1788
1789 static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
1790 {
1791 int seg = ctxt->src2.val;
1792 unsigned long selector;
1793 int rc;
1794
1795 rc = emulate_pop(ctxt, &selector, ctxt->op_bytes);
1796 if (rc != X86EMUL_CONTINUE)
1797 return rc;
1798
1799 if (ctxt->modrm_reg == VCPU_SREG_SS)
1800 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
1801
1802 rc = load_segment_descriptor(ctxt, (u16)selector, seg);
1803 return rc;
1804 }
1805
1806 static int em_pusha(struct x86_emulate_ctxt *ctxt)
1807 {
1808 unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP);
1809 int rc = X86EMUL_CONTINUE;
1810 int reg = VCPU_REGS_RAX;
1811
1812 while (reg <= VCPU_REGS_RDI) {
1813 (reg == VCPU_REGS_RSP) ?
1814 (ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg));
1815
1816 rc = em_push(ctxt);
1817 if (rc != X86EMUL_CONTINUE)
1818 return rc;
1819
1820 ++reg;
1821 }
1822
1823 return rc;
1824 }
1825
1826 static int em_pushf(struct x86_emulate_ctxt *ctxt)
1827 {
1828 ctxt->src.val = (unsigned long)ctxt->eflags;
1829 return em_push(ctxt);
1830 }
1831
1832 static int em_popa(struct x86_emulate_ctxt *ctxt)
1833 {
1834 int rc = X86EMUL_CONTINUE;
1835 int reg = VCPU_REGS_RDI;
1836
1837 while (reg >= VCPU_REGS_RAX) {
1838 if (reg == VCPU_REGS_RSP) {
1839 rsp_increment(ctxt, ctxt->op_bytes);
1840 --reg;
1841 }
1842
1843 rc = emulate_pop(ctxt, reg_rmw(ctxt, reg), ctxt->op_bytes);
1844 if (rc != X86EMUL_CONTINUE)
1845 break;
1846 --reg;
1847 }
1848 return rc;
1849 }
1850
1851 static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
1852 {
1853 const struct x86_emulate_ops *ops = ctxt->ops;
1854 int rc;
1855 struct desc_ptr dt;
1856 gva_t cs_addr;
1857 gva_t eip_addr;
1858 u16 cs, eip;
1859
1860 /* TODO: Add limit checks */
1861 ctxt->src.val = ctxt->eflags;
1862 rc = em_push(ctxt);
1863 if (rc != X86EMUL_CONTINUE)
1864 return rc;
1865
1866 ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC);
1867
1868 ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
1869 rc = em_push(ctxt);
1870 if (rc != X86EMUL_CONTINUE)
1871 return rc;
1872
1873 ctxt->src.val = ctxt->_eip;
1874 rc = em_push(ctxt);
1875 if (rc != X86EMUL_CONTINUE)
1876 return rc;
1877
1878 ops->get_idt(ctxt, &dt);
1879
1880 eip_addr = dt.address + (irq << 2);
1881 cs_addr = dt.address + (irq << 2) + 2;
1882
1883 rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception);
1884 if (rc != X86EMUL_CONTINUE)
1885 return rc;
1886
1887 rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception);
1888 if (rc != X86EMUL_CONTINUE)
1889 return rc;
1890
1891 rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
1892 if (rc != X86EMUL_CONTINUE)
1893 return rc;
1894
1895 ctxt->_eip = eip;
1896
1897 return rc;
1898 }
1899
1900 int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
1901 {
1902 int rc;
1903
1904 invalidate_registers(ctxt);
1905 rc = __emulate_int_real(ctxt, irq);
1906 if (rc == X86EMUL_CONTINUE)
1907 writeback_registers(ctxt);
1908 return rc;
1909 }
1910
1911 static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
1912 {
1913 switch(ctxt->mode) {
1914 case X86EMUL_MODE_REAL:
1915 return __emulate_int_real(ctxt, irq);
1916 case X86EMUL_MODE_VM86:
1917 case X86EMUL_MODE_PROT16:
1918 case X86EMUL_MODE_PROT32:
1919 case X86EMUL_MODE_PROT64:
1920 default:
1921 /* Protected mode interrupts unimplemented yet */
1922 return X86EMUL_UNHANDLEABLE;
1923 }
1924 }
1925
1926 static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
1927 {
1928 int rc = X86EMUL_CONTINUE;
1929 unsigned long temp_eip = 0;
1930 unsigned long temp_eflags = 0;
1931 unsigned long cs = 0;
1932 unsigned long mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_TF |
1933 EFLG_IF | EFLG_DF | EFLG_OF | EFLG_IOPL | EFLG_NT | EFLG_RF |
1934 EFLG_AC | EFLG_ID | (1 << 1); /* Last one is the reserved bit */
1935 unsigned long vm86_mask = EFLG_VM | EFLG_VIF | EFLG_VIP;
1936
1937 /* TODO: Add stack limit check */
1938
1939 rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
1940
1941 if (rc != X86EMUL_CONTINUE)
1942 return rc;
1943
1944 if (temp_eip & ~0xffff)
1945 return emulate_gp(ctxt, 0);
1946
1947 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
1948
1949 if (rc != X86EMUL_CONTINUE)
1950 return rc;
1951
1952 rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
1953
1954 if (rc != X86EMUL_CONTINUE)
1955 return rc;
1956
1957 rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
1958
1959 if (rc != X86EMUL_CONTINUE)
1960 return rc;
1961
1962 ctxt->_eip = temp_eip;
1963
1964
1965 if (ctxt->op_bytes == 4)
1966 ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
1967 else if (ctxt->op_bytes == 2) {
1968 ctxt->eflags &= ~0xffff;
1969 ctxt->eflags |= temp_eflags;
1970 }
1971
1972 ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
1973 ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
1974
1975 return rc;
1976 }
1977
1978 static int em_iret(struct x86_emulate_ctxt *ctxt)
1979 {
1980 switch(ctxt->mode) {
1981 case X86EMUL_MODE_REAL:
1982 return emulate_iret_real(ctxt);
1983 case X86EMUL_MODE_VM86:
1984 case X86EMUL_MODE_PROT16:
1985 case X86EMUL_MODE_PROT32:
1986 case X86EMUL_MODE_PROT64:
1987 default:
1988 /* iret from protected mode unimplemented yet */
1989 return X86EMUL_UNHANDLEABLE;
1990 }
1991 }
1992
1993 static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
1994 {
1995 int rc;
1996 unsigned short sel, old_sel;
1997 struct desc_struct old_desc, new_desc;
1998 const struct x86_emulate_ops *ops = ctxt->ops;
1999 u8 cpl = ctxt->ops->cpl(ctxt);
2000
2001 /* Assignment of RIP may only fail in 64-bit mode */
2002 if (ctxt->mode == X86EMUL_MODE_PROT64)
2003 ops->get_segment(ctxt, &old_sel, &old_desc, NULL,
2004 VCPU_SREG_CS);
2005
2006 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2007
2008 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, false,
2009 &new_desc);
2010 if (rc != X86EMUL_CONTINUE)
2011 return rc;
2012
2013 rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l);
2014 if (rc != X86EMUL_CONTINUE) {
2015 WARN_ON(!ctxt->mode != X86EMUL_MODE_PROT64);
2016 /* assigning eip failed; restore the old cs */
2017 ops->set_segment(ctxt, old_sel, &old_desc, 0, VCPU_SREG_CS);
2018 return rc;
2019 }
2020 return rc;
2021 }
2022
2023 static int em_grp45(struct x86_emulate_ctxt *ctxt)
2024 {
2025 int rc = X86EMUL_CONTINUE;
2026
2027 switch (ctxt->modrm_reg) {
2028 case 2: /* call near abs */ {
2029 long int old_eip;
2030 old_eip = ctxt->_eip;
2031 rc = assign_eip_near(ctxt, ctxt->src.val);
2032 if (rc != X86EMUL_CONTINUE)
2033 break;
2034 ctxt->src.val = old_eip;
2035 rc = em_push(ctxt);
2036 break;
2037 }
2038 case 4: /* jmp abs */
2039 rc = assign_eip_near(ctxt, ctxt->src.val);
2040 break;
2041 case 5: /* jmp far */
2042 rc = em_jmp_far(ctxt);
2043 break;
2044 case 6: /* push */
2045 rc = em_push(ctxt);
2046 break;
2047 }
2048 return rc;
2049 }
2050
2051 static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
2052 {
2053 u64 old = ctxt->dst.orig_val64;
2054
2055 if (ctxt->dst.bytes == 16)
2056 return X86EMUL_UNHANDLEABLE;
2057
2058 if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) ||
2059 ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
2060 *reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
2061 *reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
2062 ctxt->eflags &= ~EFLG_ZF;
2063 } else {
2064 ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
2065 (u32) reg_read(ctxt, VCPU_REGS_RBX);
2066
2067 ctxt->eflags |= EFLG_ZF;
2068 }
2069 return X86EMUL_CONTINUE;
2070 }
2071
2072 static int em_ret(struct x86_emulate_ctxt *ctxt)
2073 {
2074 int rc;
2075 unsigned long eip;
2076
2077 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2078 if (rc != X86EMUL_CONTINUE)
2079 return rc;
2080
2081 return assign_eip_near(ctxt, eip);
2082 }
2083
2084 static int em_ret_far(struct x86_emulate_ctxt *ctxt)
2085 {
2086 int rc;
2087 unsigned long eip, cs;
2088 u16 old_cs;
2089 int cpl = ctxt->ops->cpl(ctxt);
2090 struct desc_struct old_desc, new_desc;
2091 const struct x86_emulate_ops *ops = ctxt->ops;
2092
2093 if (ctxt->mode == X86EMUL_MODE_PROT64)
2094 ops->get_segment(ctxt, &old_cs, &old_desc, NULL,
2095 VCPU_SREG_CS);
2096
2097 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2098 if (rc != X86EMUL_CONTINUE)
2099 return rc;
2100 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2101 if (rc != X86EMUL_CONTINUE)
2102 return rc;
2103 /* Outer-privilege level return is not implemented */
2104 if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
2105 return X86EMUL_UNHANDLEABLE;
2106 rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, 0, false,
2107 &new_desc);
2108 if (rc != X86EMUL_CONTINUE)
2109 return rc;
2110 rc = assign_eip_far(ctxt, eip, new_desc.l);
2111 if (rc != X86EMUL_CONTINUE) {
2112 WARN_ON(!ctxt->mode != X86EMUL_MODE_PROT64);
2113 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
2114 }
2115 return rc;
2116 }
2117
2118 static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
2119 {
2120 int rc;
2121
2122 rc = em_ret_far(ctxt);
2123 if (rc != X86EMUL_CONTINUE)
2124 return rc;
2125 rsp_increment(ctxt, ctxt->src.val);
2126 return X86EMUL_CONTINUE;
2127 }
2128
2129 static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
2130 {
2131 /* Save real source value, then compare EAX against destination. */
2132 ctxt->dst.orig_val = ctxt->dst.val;
2133 ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX);
2134 ctxt->src.orig_val = ctxt->src.val;
2135 ctxt->src.val = ctxt->dst.orig_val;
2136 fastop(ctxt, em_cmp);
2137
2138 if (ctxt->eflags & EFLG_ZF) {
2139 /* Success: write back to memory. */
2140 ctxt->dst.val = ctxt->src.orig_val;
2141 } else {
2142 /* Failure: write the value we saw to EAX. */
2143 ctxt->dst.type = OP_REG;
2144 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
2145 ctxt->dst.val = ctxt->dst.orig_val;
2146 }
2147 return X86EMUL_CONTINUE;
2148 }
2149
2150 static int em_lseg(struct x86_emulate_ctxt *ctxt)
2151 {
2152 int seg = ctxt->src2.val;
2153 unsigned short sel;
2154 int rc;
2155
2156 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2157
2158 rc = load_segment_descriptor(ctxt, sel, seg);
2159 if (rc != X86EMUL_CONTINUE)
2160 return rc;
2161
2162 ctxt->dst.val = ctxt->src.val;
2163 return rc;
2164 }
2165
2166 static void
2167 setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
2168 struct desc_struct *cs, struct desc_struct *ss)
2169 {
2170 cs->l = 0; /* will be adjusted later */
2171 set_desc_base(cs, 0); /* flat segment */
2172 cs->g = 1; /* 4kb granularity */
2173 set_desc_limit(cs, 0xfffff); /* 4GB limit */
2174 cs->type = 0x0b; /* Read, Execute, Accessed */
2175 cs->s = 1;
2176 cs->dpl = 0; /* will be adjusted later */
2177 cs->p = 1;
2178 cs->d = 1;
2179 cs->avl = 0;
2180
2181 set_desc_base(ss, 0); /* flat segment */
2182 set_desc_limit(ss, 0xfffff); /* 4GB limit */
2183 ss->g = 1; /* 4kb granularity */
2184 ss->s = 1;
2185 ss->type = 0x03; /* Read/Write, Accessed */
2186 ss->d = 1; /* 32bit stack segment */
2187 ss->dpl = 0;
2188 ss->p = 1;
2189 ss->l = 0;
2190 ss->avl = 0;
2191 }
2192
2193 static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
2194 {
2195 u32 eax, ebx, ecx, edx;
2196
2197 eax = ecx = 0;
2198 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2199 return ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
2200 && ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
2201 && edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx;
2202 }
2203
2204 static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
2205 {
2206 const struct x86_emulate_ops *ops = ctxt->ops;
2207 u32 eax, ebx, ecx, edx;
2208
2209 /*
2210 * syscall should always be enabled in longmode - so only become
2211 * vendor specific (cpuid) if other modes are active...
2212 */
2213 if (ctxt->mode == X86EMUL_MODE_PROT64)
2214 return true;
2215
2216 eax = 0x00000000;
2217 ecx = 0x00000000;
2218 ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2219 /*
2220 * Intel ("GenuineIntel")
2221 * remark: Intel CPUs only support "syscall" in 64bit
2222 * longmode. Also an 64bit guest with a
2223 * 32bit compat-app running will #UD !! While this
2224 * behaviour can be fixed (by emulating) into AMD
2225 * response - CPUs of AMD can't behave like Intel.
2226 */
2227 if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx &&
2228 ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
2229 edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx)
2230 return false;
2231
2232 /* AMD ("AuthenticAMD") */
2233 if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
2234 ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
2235 edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
2236 return true;
2237
2238 /* AMD ("AMDisbetter!") */
2239 if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
2240 ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
2241 edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
2242 return true;
2243
2244 /* default: (not Intel, not AMD), apply Intel's stricter rules... */
2245 return false;
2246 }
2247
2248 static int em_syscall(struct x86_emulate_ctxt *ctxt)
2249 {
2250 const struct x86_emulate_ops *ops = ctxt->ops;
2251 struct desc_struct cs, ss;
2252 u64 msr_data;
2253 u16 cs_sel, ss_sel;
2254 u64 efer = 0;
2255
2256 /* syscall is not available in real mode */
2257 if (ctxt->mode == X86EMUL_MODE_REAL ||
2258 ctxt->mode == X86EMUL_MODE_VM86)
2259 return emulate_ud(ctxt);
2260
2261 if (!(em_syscall_is_enabled(ctxt)))
2262 return emulate_ud(ctxt);
2263
2264 ops->get_msr(ctxt, MSR_EFER, &efer);
2265 setup_syscalls_segments(ctxt, &cs, &ss);
2266
2267 if (!(efer & EFER_SCE))
2268 return emulate_ud(ctxt);
2269
2270 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2271 msr_data >>= 32;
2272 cs_sel = (u16)(msr_data & 0xfffc);
2273 ss_sel = (u16)(msr_data + 8);
2274
2275 if (efer & EFER_LMA) {
2276 cs.d = 0;
2277 cs.l = 1;
2278 }
2279 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2280 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2281
2282 *reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip;
2283 if (efer & EFER_LMA) {
2284 #ifdef CONFIG_X86_64
2285 *reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags;
2286
2287 ops->get_msr(ctxt,
2288 ctxt->mode == X86EMUL_MODE_PROT64 ?
2289 MSR_LSTAR : MSR_CSTAR, &msr_data);
2290 ctxt->_eip = msr_data;
2291
2292 ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
2293 ctxt->eflags &= ~msr_data;
2294 #endif
2295 } else {
2296 /* legacy mode */
2297 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2298 ctxt->_eip = (u32)msr_data;
2299
2300 ctxt->eflags &= ~(EFLG_VM | EFLG_IF);
2301 }
2302
2303 return X86EMUL_CONTINUE;
2304 }
2305
2306 static int em_sysenter(struct x86_emulate_ctxt *ctxt)
2307 {
2308 const struct x86_emulate_ops *ops = ctxt->ops;
2309 struct desc_struct cs, ss;
2310 u64 msr_data;
2311 u16 cs_sel, ss_sel;
2312 u64 efer = 0;
2313
2314 ops->get_msr(ctxt, MSR_EFER, &efer);
2315 /* inject #GP if in real mode */
2316 if (ctxt->mode == X86EMUL_MODE_REAL)
2317 return emulate_gp(ctxt, 0);
2318
2319 /*
2320 * Not recognized on AMD in compat mode (but is recognized in legacy
2321 * mode).
2322 */
2323 if ((ctxt->mode == X86EMUL_MODE_PROT32) && (efer & EFER_LMA)
2324 && !vendor_intel(ctxt))
2325 return emulate_ud(ctxt);
2326
2327 /* XXX sysenter/sysexit have not been tested in 64bit mode.
2328 * Therefore, we inject an #UD.
2329 */
2330 if (ctxt->mode == X86EMUL_MODE_PROT64)
2331 return emulate_ud(ctxt);
2332
2333 setup_syscalls_segments(ctxt, &cs, &ss);
2334
2335 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2336 switch (ctxt->mode) {
2337 case X86EMUL_MODE_PROT32:
2338 if ((msr_data & 0xfffc) == 0x0)
2339 return emulate_gp(ctxt, 0);
2340 break;
2341 case X86EMUL_MODE_PROT64:
2342 if (msr_data == 0x0)
2343 return emulate_gp(ctxt, 0);
2344 break;
2345 default:
2346 break;
2347 }
2348
2349 ctxt->eflags &= ~(EFLG_VM | EFLG_IF);
2350 cs_sel = (u16)msr_data;
2351 cs_sel &= ~SELECTOR_RPL_MASK;
2352 ss_sel = cs_sel + 8;
2353 ss_sel &= ~SELECTOR_RPL_MASK;
2354 if (ctxt->mode == X86EMUL_MODE_PROT64 || (efer & EFER_LMA)) {
2355 cs.d = 0;
2356 cs.l = 1;
2357 }
2358
2359 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2360 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2361
2362 ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
2363 ctxt->_eip = msr_data;
2364
2365 ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
2366 *reg_write(ctxt, VCPU_REGS_RSP) = msr_data;
2367
2368 return X86EMUL_CONTINUE;
2369 }
2370
2371 static int em_sysexit(struct x86_emulate_ctxt *ctxt)
2372 {
2373 const struct x86_emulate_ops *ops = ctxt->ops;
2374 struct desc_struct cs, ss;
2375 u64 msr_data, rcx, rdx;
2376 int usermode;
2377 u16 cs_sel = 0, ss_sel = 0;
2378
2379 /* inject #GP if in real mode or Virtual 8086 mode */
2380 if (ctxt->mode == X86EMUL_MODE_REAL ||
2381 ctxt->mode == X86EMUL_MODE_VM86)
2382 return emulate_gp(ctxt, 0);
2383
2384 setup_syscalls_segments(ctxt, &cs, &ss);
2385
2386 if ((ctxt->rex_prefix & 0x8) != 0x0)
2387 usermode = X86EMUL_MODE_PROT64;
2388 else
2389 usermode = X86EMUL_MODE_PROT32;
2390
2391 rcx = reg_read(ctxt, VCPU_REGS_RCX);
2392 rdx = reg_read(ctxt, VCPU_REGS_RDX);
2393
2394 cs.dpl = 3;
2395 ss.dpl = 3;
2396 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2397 switch (usermode) {
2398 case X86EMUL_MODE_PROT32:
2399 cs_sel = (u16)(msr_data + 16);
2400 if ((msr_data & 0xfffc) == 0x0)
2401 return emulate_gp(ctxt, 0);
2402 ss_sel = (u16)(msr_data + 24);
2403 break;
2404 case X86EMUL_MODE_PROT64:
2405 cs_sel = (u16)(msr_data + 32);
2406 if (msr_data == 0x0)
2407 return emulate_gp(ctxt, 0);
2408 ss_sel = cs_sel + 8;
2409 cs.d = 0;
2410 cs.l = 1;
2411 if (is_noncanonical_address(rcx) ||
2412 is_noncanonical_address(rdx))
2413 return emulate_gp(ctxt, 0);
2414 break;
2415 }
2416 cs_sel |= SELECTOR_RPL_MASK;
2417 ss_sel |= SELECTOR_RPL_MASK;
2418
2419 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2420 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2421
2422 ctxt->_eip = rdx;
2423 *reg_write(ctxt, VCPU_REGS_RSP) = rcx;
2424
2425 return X86EMUL_CONTINUE;
2426 }
2427
2428 static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
2429 {
2430 int iopl;
2431 if (ctxt->mode == X86EMUL_MODE_REAL)
2432 return false;
2433 if (ctxt->mode == X86EMUL_MODE_VM86)
2434 return true;
2435 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
2436 return ctxt->ops->cpl(ctxt) > iopl;
2437 }
2438
2439 static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
2440 u16 port, u16 len)
2441 {
2442 const struct x86_emulate_ops *ops = ctxt->ops;
2443 struct desc_struct tr_seg;
2444 u32 base3;
2445 int r;
2446 u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
2447 unsigned mask = (1 << len) - 1;
2448 unsigned long base;
2449
2450 ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
2451 if (!tr_seg.p)
2452 return false;
2453 if (desc_limit_scaled(&tr_seg) < 103)
2454 return false;
2455 base = get_desc_base(&tr_seg);
2456 #ifdef CONFIG_X86_64
2457 base |= ((u64)base3) << 32;
2458 #endif
2459 r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL);
2460 if (r != X86EMUL_CONTINUE)
2461 return false;
2462 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
2463 return false;
2464 r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL);
2465 if (r != X86EMUL_CONTINUE)
2466 return false;
2467 if ((perm >> bit_idx) & mask)
2468 return false;
2469 return true;
2470 }
2471
2472 static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
2473 u16 port, u16 len)
2474 {
2475 if (ctxt->perm_ok)
2476 return true;
2477
2478 if (emulator_bad_iopl(ctxt))
2479 if (!emulator_io_port_access_allowed(ctxt, port, len))
2480 return false;
2481
2482 ctxt->perm_ok = true;
2483
2484 return true;
2485 }
2486
2487 static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
2488 struct tss_segment_16 *tss)
2489 {
2490 tss->ip = ctxt->_eip;
2491 tss->flag = ctxt->eflags;
2492 tss->ax = reg_read(ctxt, VCPU_REGS_RAX);
2493 tss->cx = reg_read(ctxt, VCPU_REGS_RCX);
2494 tss->dx = reg_read(ctxt, VCPU_REGS_RDX);
2495 tss->bx = reg_read(ctxt, VCPU_REGS_RBX);
2496 tss->sp = reg_read(ctxt, VCPU_REGS_RSP);
2497 tss->bp = reg_read(ctxt, VCPU_REGS_RBP);
2498 tss->si = reg_read(ctxt, VCPU_REGS_RSI);
2499 tss->di = reg_read(ctxt, VCPU_REGS_RDI);
2500
2501 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2502 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2503 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2504 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2505 tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
2506 }
2507
2508 static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
2509 struct tss_segment_16 *tss)
2510 {
2511 int ret;
2512 u8 cpl;
2513
2514 ctxt->_eip = tss->ip;
2515 ctxt->eflags = tss->flag | 2;
2516 *reg_write(ctxt, VCPU_REGS_RAX) = tss->ax;
2517 *reg_write(ctxt, VCPU_REGS_RCX) = tss->cx;
2518 *reg_write(ctxt, VCPU_REGS_RDX) = tss->dx;
2519 *reg_write(ctxt, VCPU_REGS_RBX) = tss->bx;
2520 *reg_write(ctxt, VCPU_REGS_RSP) = tss->sp;
2521 *reg_write(ctxt, VCPU_REGS_RBP) = tss->bp;
2522 *reg_write(ctxt, VCPU_REGS_RSI) = tss->si;
2523 *reg_write(ctxt, VCPU_REGS_RDI) = tss->di;
2524
2525 /*
2526 * SDM says that segment selectors are loaded before segment
2527 * descriptors
2528 */
2529 set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
2530 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2531 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2532 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2533 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2534
2535 cpl = tss->cs & 3;
2536
2537 /*
2538 * Now load segment descriptors. If fault happens at this stage
2539 * it is handled in a context of new task
2540 */
2541 ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
2542 true, NULL);
2543 if (ret != X86EMUL_CONTINUE)
2544 return ret;
2545 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
2546 true, NULL);
2547 if (ret != X86EMUL_CONTINUE)
2548 return ret;
2549 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
2550 true, NULL);
2551 if (ret != X86EMUL_CONTINUE)
2552 return ret;
2553 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
2554 true, NULL);
2555 if (ret != X86EMUL_CONTINUE)
2556 return ret;
2557 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
2558 true, NULL);
2559 if (ret != X86EMUL_CONTINUE)
2560 return ret;
2561
2562 return X86EMUL_CONTINUE;
2563 }
2564
2565 static int task_switch_16(struct x86_emulate_ctxt *ctxt,
2566 u16 tss_selector, u16 old_tss_sel,
2567 ulong old_tss_base, struct desc_struct *new_desc)
2568 {
2569 const struct x86_emulate_ops *ops = ctxt->ops;
2570 struct tss_segment_16 tss_seg;
2571 int ret;
2572 u32 new_tss_base = get_desc_base(new_desc);
2573
2574 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2575 &ctxt->exception);
2576 if (ret != X86EMUL_CONTINUE)
2577 /* FIXME: need to provide precise fault address */
2578 return ret;
2579
2580 save_state_to_tss16(ctxt, &tss_seg);
2581
2582 ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2583 &ctxt->exception);
2584 if (ret != X86EMUL_CONTINUE)
2585 /* FIXME: need to provide precise fault address */
2586 return ret;
2587
2588 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2589 &ctxt->exception);
2590 if (ret != X86EMUL_CONTINUE)
2591 /* FIXME: need to provide precise fault address */
2592 return ret;
2593
2594 if (old_tss_sel != 0xffff) {
2595 tss_seg.prev_task_link = old_tss_sel;
2596
2597 ret = ops->write_std(ctxt, new_tss_base,
2598 &tss_seg.prev_task_link,
2599 sizeof tss_seg.prev_task_link,
2600 &ctxt->exception);
2601 if (ret != X86EMUL_CONTINUE)
2602 /* FIXME: need to provide precise fault address */
2603 return ret;
2604 }
2605
2606 return load_state_from_tss16(ctxt, &tss_seg);
2607 }
2608
2609 static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
2610 struct tss_segment_32 *tss)
2611 {
2612 /* CR3 and ldt selector are not saved intentionally */
2613 tss->eip = ctxt->_eip;
2614 tss->eflags = ctxt->eflags;
2615 tss->eax = reg_read(ctxt, VCPU_REGS_RAX);
2616 tss->ecx = reg_read(ctxt, VCPU_REGS_RCX);
2617 tss->edx = reg_read(ctxt, VCPU_REGS_RDX);
2618 tss->ebx = reg_read(ctxt, VCPU_REGS_RBX);
2619 tss->esp = reg_read(ctxt, VCPU_REGS_RSP);
2620 tss->ebp = reg_read(ctxt, VCPU_REGS_RBP);
2621 tss->esi = reg_read(ctxt, VCPU_REGS_RSI);
2622 tss->edi = reg_read(ctxt, VCPU_REGS_RDI);
2623
2624 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2625 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2626 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2627 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2628 tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
2629 tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
2630 }
2631
2632 static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
2633 struct tss_segment_32 *tss)
2634 {
2635 int ret;
2636 u8 cpl;
2637
2638 if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
2639 return emulate_gp(ctxt, 0);
2640 ctxt->_eip = tss->eip;
2641 ctxt->eflags = tss->eflags | 2;
2642
2643 /* General purpose registers */
2644 *reg_write(ctxt, VCPU_REGS_RAX) = tss->eax;
2645 *reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx;
2646 *reg_write(ctxt, VCPU_REGS_RDX) = tss->edx;
2647 *reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx;
2648 *reg_write(ctxt, VCPU_REGS_RSP) = tss->esp;
2649 *reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp;
2650 *reg_write(ctxt, VCPU_REGS_RSI) = tss->esi;
2651 *reg_write(ctxt, VCPU_REGS_RDI) = tss->edi;
2652
2653 /*
2654 * SDM says that segment selectors are loaded before segment
2655 * descriptors. This is important because CPL checks will
2656 * use CS.RPL.
2657 */
2658 set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
2659 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2660 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2661 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2662 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2663 set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
2664 set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
2665
2666 /*
2667 * If we're switching between Protected Mode and VM86, we need to make
2668 * sure to update the mode before loading the segment descriptors so
2669 * that the selectors are interpreted correctly.
2670 */
2671 if (ctxt->eflags & X86_EFLAGS_VM) {
2672 ctxt->mode = X86EMUL_MODE_VM86;
2673 cpl = 3;
2674 } else {
2675 ctxt->mode = X86EMUL_MODE_PROT32;
2676 cpl = tss->cs & 3;
2677 }
2678
2679 /*
2680 * Now load segment descriptors. If fault happenes at this stage
2681 * it is handled in a context of new task
2682 */
2683 ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
2684 cpl, true, NULL);
2685 if (ret != X86EMUL_CONTINUE)
2686 return ret;
2687 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
2688 true, NULL);
2689 if (ret != X86EMUL_CONTINUE)
2690 return ret;
2691 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
2692 true, NULL);
2693 if (ret != X86EMUL_CONTINUE)
2694 return ret;
2695 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
2696 true, NULL);
2697 if (ret != X86EMUL_CONTINUE)
2698 return ret;
2699 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
2700 true, NULL);
2701 if (ret != X86EMUL_CONTINUE)
2702 return ret;
2703 ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
2704 true, NULL);
2705 if (ret != X86EMUL_CONTINUE)
2706 return ret;
2707 ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
2708 true, NULL);
2709 if (ret != X86EMUL_CONTINUE)
2710 return ret;
2711
2712 return X86EMUL_CONTINUE;
2713 }
2714
2715 static int task_switch_32(struct x86_emulate_ctxt *ctxt,
2716 u16 tss_selector, u16 old_tss_sel,
2717 ulong old_tss_base, struct desc_struct *new_desc)
2718 {
2719 const struct x86_emulate_ops *ops = ctxt->ops;
2720 struct tss_segment_32 tss_seg;
2721 int ret;
2722 u32 new_tss_base = get_desc_base(new_desc);
2723 u32 eip_offset = offsetof(struct tss_segment_32, eip);
2724 u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
2725
2726 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2727 &ctxt->exception);
2728 if (ret != X86EMUL_CONTINUE)
2729 /* FIXME: need to provide precise fault address */
2730 return ret;
2731
2732 save_state_to_tss32(ctxt, &tss_seg);
2733
2734 /* Only GP registers and segment selectors are saved */
2735 ret = ops->write_std(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
2736 ldt_sel_offset - eip_offset, &ctxt->exception);
2737 if (ret != X86EMUL_CONTINUE)
2738 /* FIXME: need to provide precise fault address */
2739 return ret;
2740
2741 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2742 &ctxt->exception);
2743 if (ret != X86EMUL_CONTINUE)
2744 /* FIXME: need to provide precise fault address */
2745 return ret;
2746
2747 if (old_tss_sel != 0xffff) {
2748 tss_seg.prev_task_link = old_tss_sel;
2749
2750 ret = ops->write_std(ctxt, new_tss_base,
2751 &tss_seg.prev_task_link,
2752 sizeof tss_seg.prev_task_link,
2753 &ctxt->exception);
2754 if (ret != X86EMUL_CONTINUE)
2755 /* FIXME: need to provide precise fault address */
2756 return ret;
2757 }
2758
2759 return load_state_from_tss32(ctxt, &tss_seg);
2760 }
2761
2762 static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
2763 u16 tss_selector, int idt_index, int reason,
2764 bool has_error_code, u32 error_code)
2765 {
2766 const struct x86_emulate_ops *ops = ctxt->ops;
2767 struct desc_struct curr_tss_desc, next_tss_desc;
2768 int ret;
2769 u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
2770 ulong old_tss_base =
2771 ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
2772 u32 desc_limit;
2773 ulong desc_addr;
2774
2775 /* FIXME: old_tss_base == ~0 ? */
2776
2777 ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr);
2778 if (ret != X86EMUL_CONTINUE)
2779 return ret;
2780 ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr);
2781 if (ret != X86EMUL_CONTINUE)
2782 return ret;
2783
2784 /* FIXME: check that next_tss_desc is tss */
2785
2786 /*
2787 * Check privileges. The three cases are task switch caused by...
2788 *
2789 * 1. jmp/call/int to task gate: Check against DPL of the task gate
2790 * 2. Exception/IRQ/iret: No check is performed
2791 * 3. jmp/call to TSS: Check against DPL of the TSS
2792 */
2793 if (reason == TASK_SWITCH_GATE) {
2794 if (idt_index != -1) {
2795 /* Software interrupts */
2796 struct desc_struct task_gate_desc;
2797 int dpl;
2798
2799 ret = read_interrupt_descriptor(ctxt, idt_index,
2800 &task_gate_desc);
2801 if (ret != X86EMUL_CONTINUE)
2802 return ret;
2803
2804 dpl = task_gate_desc.dpl;
2805 if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
2806 return emulate_gp(ctxt, (idt_index << 3) | 0x2);
2807 }
2808 } else if (reason != TASK_SWITCH_IRET) {
2809 int dpl = next_tss_desc.dpl;
2810 if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
2811 return emulate_gp(ctxt, tss_selector);
2812 }
2813
2814
2815 desc_limit = desc_limit_scaled(&next_tss_desc);
2816 if (!next_tss_desc.p ||
2817 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
2818 desc_limit < 0x2b)) {
2819 return emulate_ts(ctxt, tss_selector & 0xfffc);
2820 }
2821
2822 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
2823 curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
2824 write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
2825 }
2826
2827 if (reason == TASK_SWITCH_IRET)
2828 ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
2829
2830 /* set back link to prev task only if NT bit is set in eflags
2831 note that old_tss_sel is not used after this point */
2832 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
2833 old_tss_sel = 0xffff;
2834
2835 if (next_tss_desc.type & 8)
2836 ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
2837 old_tss_base, &next_tss_desc);
2838 else
2839 ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
2840 old_tss_base, &next_tss_desc);
2841 if (ret != X86EMUL_CONTINUE)
2842 return ret;
2843
2844 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
2845 ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
2846
2847 if (reason != TASK_SWITCH_IRET) {
2848 next_tss_desc.type |= (1 << 1); /* set busy flag */
2849 write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
2850 }
2851
2852 ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS);
2853 ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
2854
2855 if (has_error_code) {
2856 ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
2857 ctxt->lock_prefix = 0;
2858 ctxt->src.val = (unsigned long) error_code;
2859 ret = em_push(ctxt);
2860 }
2861
2862 return ret;
2863 }
2864
2865 int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
2866 u16 tss_selector, int idt_index, int reason,
2867 bool has_error_code, u32 error_code)
2868 {
2869 int rc;
2870
2871 invalidate_registers(ctxt);
2872 ctxt->_eip = ctxt->eip;
2873 ctxt->dst.type = OP_NONE;
2874
2875 rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
2876 has_error_code, error_code);
2877
2878 if (rc == X86EMUL_CONTINUE) {
2879 ctxt->eip = ctxt->_eip;
2880 writeback_registers(ctxt);
2881 }
2882
2883 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
2884 }
2885
2886 static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
2887 struct operand *op)
2888 {
2889 int df = (ctxt->eflags & EFLG_DF) ? -op->count : op->count;
2890
2891 register_address_increment(ctxt, reg_rmw(ctxt, reg), df * op->bytes);
2892 op->addr.mem.ea = register_address(ctxt, reg_read(ctxt, reg));
2893 }
2894
2895 static int em_das(struct x86_emulate_ctxt *ctxt)
2896 {
2897 u8 al, old_al;
2898 bool af, cf, old_cf;
2899
2900 cf = ctxt->eflags & X86_EFLAGS_CF;
2901 al = ctxt->dst.val;
2902
2903 old_al = al;
2904 old_cf = cf;
2905 cf = false;
2906 af = ctxt->eflags & X86_EFLAGS_AF;
2907 if ((al & 0x0f) > 9 || af) {
2908 al -= 6;
2909 cf = old_cf | (al >= 250);
2910 af = true;
2911 } else {
2912 af = false;
2913 }
2914 if (old_al > 0x99 || old_cf) {
2915 al -= 0x60;
2916 cf = true;
2917 }
2918
2919 ctxt->dst.val = al;
2920 /* Set PF, ZF, SF */
2921 ctxt->src.type = OP_IMM;
2922 ctxt->src.val = 0;
2923 ctxt->src.bytes = 1;
2924 fastop(ctxt, em_or);
2925 ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
2926 if (cf)
2927 ctxt->eflags |= X86_EFLAGS_CF;
2928 if (af)
2929 ctxt->eflags |= X86_EFLAGS_AF;
2930 return X86EMUL_CONTINUE;
2931 }
2932
2933 static int em_aam(struct x86_emulate_ctxt *ctxt)
2934 {
2935 u8 al, ah;
2936
2937 if (ctxt->src.val == 0)
2938 return emulate_de(ctxt);
2939
2940 al = ctxt->dst.val & 0xff;
2941 ah = al / ctxt->src.val;
2942 al %= ctxt->src.val;
2943
2944 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);
2945
2946 /* Set PF, ZF, SF */
2947 ctxt->src.type = OP_IMM;
2948 ctxt->src.val = 0;
2949 ctxt->src.bytes = 1;
2950 fastop(ctxt, em_or);
2951
2952 return X86EMUL_CONTINUE;
2953 }
2954
2955 static int em_aad(struct x86_emulate_ctxt *ctxt)
2956 {
2957 u8 al = ctxt->dst.val & 0xff;
2958 u8 ah = (ctxt->dst.val >> 8) & 0xff;
2959
2960 al = (al + (ah * ctxt->src.val)) & 0xff;
2961
2962 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al;
2963
2964 /* Set PF, ZF, SF */
2965 ctxt->src.type = OP_IMM;
2966 ctxt->src.val = 0;
2967 ctxt->src.bytes = 1;
2968 fastop(ctxt, em_or);
2969
2970 return X86EMUL_CONTINUE;
2971 }
2972
2973 static int em_call(struct x86_emulate_ctxt *ctxt)
2974 {
2975 int rc;
2976 long rel = ctxt->src.val;
2977
2978 ctxt->src.val = (unsigned long)ctxt->_eip;
2979 rc = jmp_rel(ctxt, rel);
2980 if (rc != X86EMUL_CONTINUE)
2981 return rc;
2982 return em_push(ctxt);
2983 }
2984
2985 static int em_call_far(struct x86_emulate_ctxt *ctxt)
2986 {
2987 u16 sel, old_cs;
2988 ulong old_eip;
2989 int rc;
2990 struct desc_struct old_desc, new_desc;
2991 const struct x86_emulate_ops *ops = ctxt->ops;
2992 int cpl = ctxt->ops->cpl(ctxt);
2993
2994 old_eip = ctxt->_eip;
2995 ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
2996
2997 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2998 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, false,
2999 &new_desc);
3000 if (rc != X86EMUL_CONTINUE)
3001 return X86EMUL_CONTINUE;
3002
3003 rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l);
3004 if (rc != X86EMUL_CONTINUE)
3005 goto fail;
3006
3007 ctxt->src.val = old_cs;
3008 rc = em_push(ctxt);
3009 if (rc != X86EMUL_CONTINUE)
3010 goto fail;
3011
3012 ctxt->src.val = old_eip;
3013 rc = em_push(ctxt);
3014 /* If we failed, we tainted the memory, but the very least we should
3015 restore cs */
3016 if (rc != X86EMUL_CONTINUE)
3017 goto fail;
3018 return rc;
3019 fail:
3020 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
3021 return rc;
3022
3023 }
3024
3025 static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
3026 {
3027 int rc;
3028 unsigned long eip;
3029
3030 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
3031 if (rc != X86EMUL_CONTINUE)
3032 return rc;
3033 rc = assign_eip_near(ctxt, eip);
3034 if (rc != X86EMUL_CONTINUE)
3035 return rc;
3036 rsp_increment(ctxt, ctxt->src.val);
3037 return X86EMUL_CONTINUE;
3038 }
3039
3040 static int em_xchg(struct x86_emulate_ctxt *ctxt)
3041 {
3042 /* Write back the register source. */
3043 ctxt->src.val = ctxt->dst.val;
3044 write_register_operand(&ctxt->src);
3045
3046 /* Write back the memory destination with implicit LOCK prefix. */
3047 ctxt->dst.val = ctxt->src.orig_val;
3048 ctxt->lock_prefix = 1;
3049 return X86EMUL_CONTINUE;
3050 }
3051
3052 static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
3053 {
3054 ctxt->dst.val = ctxt->src2.val;
3055 return fastop(ctxt, em_imul);
3056 }
3057
3058 static int em_cwd(struct x86_emulate_ctxt *ctxt)
3059 {
3060 ctxt->dst.type = OP_REG;
3061 ctxt->dst.bytes = ctxt->src.bytes;
3062 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
3063 ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
3064
3065 return X86EMUL_CONTINUE;
3066 }
3067
3068 static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
3069 {
3070 u64 tsc = 0;
3071
3072 ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
3073 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc;
3074 *reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32;
3075 return X86EMUL_CONTINUE;
3076 }
3077
3078 static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
3079 {
3080 u64 pmc;
3081
3082 if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc))
3083 return emulate_gp(ctxt, 0);
3084 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc;
3085 *reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32;
3086 return X86EMUL_CONTINUE;
3087 }
3088
3089 static int em_mov(struct x86_emulate_ctxt *ctxt)
3090 {
3091 memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr));
3092 return X86EMUL_CONTINUE;
3093 }
3094
3095 #define FFL(x) bit(X86_FEATURE_##x)
3096
3097 static int em_movbe(struct x86_emulate_ctxt *ctxt)
3098 {
3099 u32 ebx, ecx, edx, eax = 1;
3100 u16 tmp;
3101
3102 /*
3103 * Check MOVBE is set in the guest-visible CPUID leaf.
3104 */
3105 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
3106 if (!(ecx & FFL(MOVBE)))
3107 return emulate_ud(ctxt);
3108
3109 switch (ctxt->op_bytes) {
3110 case 2:
3111 /*
3112 * From MOVBE definition: "...When the operand size is 16 bits,
3113 * the upper word of the destination register remains unchanged
3114 * ..."
3115 *
3116 * Both casting ->valptr and ->val to u16 breaks strict aliasing
3117 * rules so we have to do the operation almost per hand.
3118 */
3119 tmp = (u16)ctxt->src.val;
3120 ctxt->dst.val &= ~0xffffUL;
3121 ctxt->dst.val |= (unsigned long)swab16(tmp);
3122 break;
3123 case 4:
3124 ctxt->dst.val = swab32((u32)ctxt->src.val);
3125 break;
3126 case 8:
3127 ctxt->dst.val = swab64(ctxt->src.val);
3128 break;
3129 default:
3130 BUG();
3131 }
3132 return X86EMUL_CONTINUE;
3133 }
3134
3135 static int em_cr_write(struct x86_emulate_ctxt *ctxt)
3136 {
3137 if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
3138 return emulate_gp(ctxt, 0);
3139
3140 /* Disable writeback. */
3141 ctxt->dst.type = OP_NONE;
3142 return X86EMUL_CONTINUE;
3143 }
3144
3145 static int em_dr_write(struct x86_emulate_ctxt *ctxt)
3146 {
3147 unsigned long val;
3148
3149 if (ctxt->mode == X86EMUL_MODE_PROT64)
3150 val = ctxt->src.val & ~0ULL;
3151 else
3152 val = ctxt->src.val & ~0U;
3153
3154 /* #UD condition is already handled. */
3155 if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
3156 return emulate_gp(ctxt, 0);
3157
3158 /* Disable writeback. */
3159 ctxt->dst.type = OP_NONE;
3160 return X86EMUL_CONTINUE;
3161 }
3162
3163 static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
3164 {
3165 u64 msr_data;
3166
3167 msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
3168 | ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
3169 if (ctxt->ops->set_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), msr_data))
3170 return emulate_gp(ctxt, 0);
3171
3172 return X86EMUL_CONTINUE;
3173 }
3174
3175 static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
3176 {
3177 u64 msr_data;
3178
3179 if (ctxt->ops->get_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &msr_data))
3180 return emulate_gp(ctxt, 0);
3181
3182 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
3183 *reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32;
3184 return X86EMUL_CONTINUE;
3185 }
3186
3187 static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
3188 {
3189 if (ctxt->modrm_reg > VCPU_SREG_GS)
3190 return emulate_ud(ctxt);
3191
3192 ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg);
3193 return X86EMUL_CONTINUE;
3194 }
3195
3196 static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
3197 {
3198 u16 sel = ctxt->src.val;
3199
3200 if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
3201 return emulate_ud(ctxt);
3202
3203 if (ctxt->modrm_reg == VCPU_SREG_SS)
3204 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
3205
3206 /* Disable writeback. */
3207 ctxt->dst.type = OP_NONE;
3208 return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
3209 }
3210
3211 static int em_lldt(struct x86_emulate_ctxt *ctxt)
3212 {
3213 u16 sel = ctxt->src.val;
3214
3215 /* Disable writeback. */
3216 ctxt->dst.type = OP_NONE;
3217 return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
3218 }
3219
3220 static int em_ltr(struct x86_emulate_ctxt *ctxt)
3221 {
3222 u16 sel = ctxt->src.val;
3223
3224 /* Disable writeback. */
3225 ctxt->dst.type = OP_NONE;
3226 return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR);
3227 }
3228
3229 static int em_invlpg(struct x86_emulate_ctxt *ctxt)
3230 {
3231 int rc;
3232 ulong linear;
3233
3234 rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
3235 if (rc == X86EMUL_CONTINUE)
3236 ctxt->ops->invlpg(ctxt, linear);
3237 /* Disable writeback. */
3238 ctxt->dst.type = OP_NONE;
3239 return X86EMUL_CONTINUE;
3240 }
3241
3242 static int em_clts(struct x86_emulate_ctxt *ctxt)
3243 {
3244 ulong cr0;
3245
3246 cr0 = ctxt->ops->get_cr(ctxt, 0);
3247 cr0 &= ~X86_CR0_TS;
3248 ctxt->ops->set_cr(ctxt, 0, cr0);
3249 return X86EMUL_CONTINUE;
3250 }
3251
3252 static int em_vmcall(struct x86_emulate_ctxt *ctxt)
3253 {
3254 int rc = ctxt->ops->fix_hypercall(ctxt);
3255
3256 if (rc != X86EMUL_CONTINUE)
3257 return rc;
3258
3259 /* Let the processor re-execute the fixed hypercall */
3260 ctxt->_eip = ctxt->eip;
3261 /* Disable writeback. */
3262 ctxt->dst.type = OP_NONE;
3263 return X86EMUL_CONTINUE;
3264 }
3265
3266 static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
3267 void (*get)(struct x86_emulate_ctxt *ctxt,
3268 struct desc_ptr *ptr))
3269 {
3270 struct desc_ptr desc_ptr;
3271
3272 if (ctxt->mode == X86EMUL_MODE_PROT64)
3273 ctxt->op_bytes = 8;
3274 get(ctxt, &desc_ptr);
3275 if (ctxt->op_bytes == 2) {
3276 ctxt->op_bytes = 4;
3277 desc_ptr.address &= 0x00ffffff;
3278 }
3279 /* Disable writeback. */
3280 ctxt->dst.type = OP_NONE;
3281 return segmented_write(ctxt, ctxt->dst.addr.mem,
3282 &desc_ptr, 2 + ctxt->op_bytes);
3283 }
3284
3285 static int em_sgdt(struct x86_emulate_ctxt *ctxt)
3286 {
3287 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt);
3288 }
3289
3290 static int em_sidt(struct x86_emulate_ctxt *ctxt)
3291 {
3292 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
3293 }
3294
3295 static int em_lgdt(struct x86_emulate_ctxt *ctxt)
3296 {
3297 struct desc_ptr desc_ptr;
3298 int rc;
3299
3300 if (ctxt->mode == X86EMUL_MODE_PROT64)
3301 ctxt->op_bytes = 8;
3302 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
3303 &desc_ptr.size, &desc_ptr.address,
3304 ctxt->op_bytes);
3305 if (rc != X86EMUL_CONTINUE)
3306 return rc;
3307 ctxt->ops->set_gdt(ctxt, &desc_ptr);
3308 /* Disable writeback. */
3309 ctxt->dst.type = OP_NONE;
3310 return X86EMUL_CONTINUE;
3311 }
3312
3313 static int em_vmmcall(struct x86_emulate_ctxt *ctxt)
3314 {
3315 int rc;
3316
3317 rc = ctxt->ops->fix_hypercall(ctxt);
3318
3319 /* Disable writeback. */
3320 ctxt->dst.type = OP_NONE;
3321 return rc;
3322 }
3323
3324 static int em_lidt(struct x86_emulate_ctxt *ctxt)
3325 {
3326 struct desc_ptr desc_ptr;
3327 int rc;
3328
3329 if (ctxt->mode == X86EMUL_MODE_PROT64)
3330 ctxt->op_bytes = 8;
3331 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
3332 &desc_ptr.size, &desc_ptr.address,
3333 ctxt->op_bytes);
3334 if (rc != X86EMUL_CONTINUE)
3335 return rc;
3336 ctxt->ops->set_idt(ctxt, &desc_ptr);
3337 /* Disable writeback. */
3338 ctxt->dst.type = OP_NONE;
3339 return X86EMUL_CONTINUE;
3340 }
3341
3342 static int em_smsw(struct x86_emulate_ctxt *ctxt)
3343 {
3344 if (ctxt->dst.type == OP_MEM)
3345 ctxt->dst.bytes = 2;
3346 ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
3347 return X86EMUL_CONTINUE;
3348 }
3349
3350 static int em_lmsw(struct x86_emulate_ctxt *ctxt)
3351 {
3352 ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
3353 | (ctxt->src.val & 0x0f));
3354 ctxt->dst.type = OP_NONE;
3355 return X86EMUL_CONTINUE;
3356 }
3357
3358 static int em_loop(struct x86_emulate_ctxt *ctxt)
3359 {
3360 int rc = X86EMUL_CONTINUE;
3361
3362 register_address_increment(ctxt, reg_rmw(ctxt, VCPU_REGS_RCX), -1);
3363 if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
3364 (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
3365 rc = jmp_rel(ctxt, ctxt->src.val);
3366
3367 return rc;
3368 }
3369
3370 static int em_jcxz(struct x86_emulate_ctxt *ctxt)
3371 {
3372 int rc = X86EMUL_CONTINUE;
3373
3374 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
3375 rc = jmp_rel(ctxt, ctxt->src.val);
3376
3377 return rc;
3378 }
3379
3380 static int em_in(struct x86_emulate_ctxt *ctxt)
3381 {
3382 if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
3383 &ctxt->dst.val))
3384 return X86EMUL_IO_NEEDED;
3385
3386 return X86EMUL_CONTINUE;
3387 }
3388
3389 static int em_out(struct x86_emulate_ctxt *ctxt)
3390 {
3391 ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
3392 &ctxt->src.val, 1);
3393 /* Disable writeback. */
3394 ctxt->dst.type = OP_NONE;
3395 return X86EMUL_CONTINUE;
3396 }
3397
3398 static int em_cli(struct x86_emulate_ctxt *ctxt)
3399 {
3400 if (emulator_bad_iopl(ctxt))
3401 return emulate_gp(ctxt, 0);
3402
3403 ctxt->eflags &= ~X86_EFLAGS_IF;
3404 return X86EMUL_CONTINUE;
3405 }
3406
3407 static int em_sti(struct x86_emulate_ctxt *ctxt)
3408 {
3409 if (emulator_bad_iopl(ctxt))
3410 return emulate_gp(ctxt, 0);
3411
3412 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
3413 ctxt->eflags |= X86_EFLAGS_IF;
3414 return X86EMUL_CONTINUE;
3415 }
3416
3417 static int em_cpuid(struct x86_emulate_ctxt *ctxt)
3418 {
3419 u32 eax, ebx, ecx, edx;
3420
3421 eax = reg_read(ctxt, VCPU_REGS_RAX);
3422 ecx = reg_read(ctxt, VCPU_REGS_RCX);
3423 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
3424 *reg_write(ctxt, VCPU_REGS_RAX) = eax;
3425 *reg_write(ctxt, VCPU_REGS_RBX) = ebx;
3426 *reg_write(ctxt, VCPU_REGS_RCX) = ecx;
3427 *reg_write(ctxt, VCPU_REGS_RDX) = edx;
3428 return X86EMUL_CONTINUE;
3429 }
3430
3431 static int em_sahf(struct x86_emulate_ctxt *ctxt)
3432 {
3433 u32 flags;
3434
3435 flags = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF;
3436 flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8;
3437
3438 ctxt->eflags &= ~0xffUL;
3439 ctxt->eflags |= flags | X86_EFLAGS_FIXED;
3440 return X86EMUL_CONTINUE;
3441 }
3442
3443 static int em_lahf(struct x86_emulate_ctxt *ctxt)
3444 {
3445 *reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL;
3446 *reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8;
3447 return X86EMUL_CONTINUE;
3448 }
3449
3450 static int em_bswap(struct x86_emulate_ctxt *ctxt)
3451 {
3452 switch (ctxt->op_bytes) {
3453 #ifdef CONFIG_X86_64
3454 case 8:
3455 asm("bswap %0" : "+r"(ctxt->dst.val));
3456 break;
3457 #endif
3458 default:
3459 asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val));
3460 break;
3461 }
3462 return X86EMUL_CONTINUE;
3463 }
3464
3465 static int em_clflush(struct x86_emulate_ctxt *ctxt)
3466 {
3467 /* emulating clflush regardless of cpuid */
3468 return X86EMUL_CONTINUE;
3469 }
3470
3471 static bool valid_cr(int nr)
3472 {
3473 switch (nr) {
3474 case 0:
3475 case 2 ... 4:
3476 case 8:
3477 return true;
3478 default:
3479 return false;
3480 }
3481 }
3482
3483 static int check_cr_read(struct x86_emulate_ctxt *ctxt)
3484 {
3485 if (!valid_cr(ctxt->modrm_reg))
3486 return emulate_ud(ctxt);
3487
3488 return X86EMUL_CONTINUE;
3489 }
3490
3491 static int check_cr_write(struct x86_emulate_ctxt *ctxt)
3492 {
3493 u64 new_val = ctxt->src.val64;
3494 int cr = ctxt->modrm_reg;
3495 u64 efer = 0;
3496
3497 static u64 cr_reserved_bits[] = {
3498 0xffffffff00000000ULL,
3499 0, 0, 0, /* CR3 checked later */
3500 CR4_RESERVED_BITS,
3501 0, 0, 0,
3502 CR8_RESERVED_BITS,
3503 };
3504
3505 if (!valid_cr(cr))
3506 return emulate_ud(ctxt);
3507
3508 if (new_val & cr_reserved_bits[cr])
3509 return emulate_gp(ctxt, 0);
3510
3511 switch (cr) {
3512 case 0: {
3513 u64 cr4;
3514 if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
3515 ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
3516 return emulate_gp(ctxt, 0);
3517
3518 cr4 = ctxt->ops->get_cr(ctxt, 4);
3519 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3520
3521 if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
3522 !(cr4 & X86_CR4_PAE))
3523 return emulate_gp(ctxt, 0);
3524
3525 break;
3526 }
3527 case 3: {
3528 u64 rsvd = 0;
3529
3530 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3531 if (efer & EFER_LMA)
3532 rsvd = CR3_L_MODE_RESERVED_BITS;
3533
3534 if (new_val & rsvd)
3535 return emulate_gp(ctxt, 0);
3536
3537 break;
3538 }
3539 case 4: {
3540 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3541
3542 if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
3543 return emulate_gp(ctxt, 0);
3544
3545 break;
3546 }
3547 }
3548
3549 return X86EMUL_CONTINUE;
3550 }
3551
3552 static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
3553 {
3554 unsigned long dr7;
3555
3556 ctxt->ops->get_dr(ctxt, 7, &dr7);
3557
3558 /* Check if DR7.Global_Enable is set */
3559 return dr7 & (1 << 13);
3560 }
3561
3562 static int check_dr_read(struct x86_emulate_ctxt *ctxt)
3563 {
3564 int dr = ctxt->modrm_reg;
3565 u64 cr4;
3566
3567 if (dr > 7)
3568 return emulate_ud(ctxt);
3569
3570 cr4 = ctxt->ops->get_cr(ctxt, 4);
3571 if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
3572 return emulate_ud(ctxt);
3573
3574 if (check_dr7_gd(ctxt))
3575 return emulate_db(ctxt);
3576
3577 return X86EMUL_CONTINUE;
3578 }
3579
3580 static int check_dr_write(struct x86_emulate_ctxt *ctxt)
3581 {
3582 u64 new_val = ctxt->src.val64;
3583 int dr = ctxt->modrm_reg;
3584
3585 if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
3586 return emulate_gp(ctxt, 0);
3587
3588 return check_dr_read(ctxt);
3589 }
3590
3591 static int check_svme(struct x86_emulate_ctxt *ctxt)
3592 {
3593 u64 efer;
3594
3595 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3596
3597 if (!(efer & EFER_SVME))
3598 return emulate_ud(ctxt);
3599
3600 return X86EMUL_CONTINUE;
3601 }
3602
3603 static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
3604 {
3605 u64 rax = reg_read(ctxt, VCPU_REGS_RAX);
3606
3607 /* Valid physical address? */
3608 if (rax & 0xffff000000000000ULL)
3609 return emulate_gp(ctxt, 0);
3610
3611 return check_svme(ctxt);
3612 }
3613
3614 static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
3615 {
3616 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
3617
3618 if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
3619 return emulate_ud(ctxt);
3620
3621 return X86EMUL_CONTINUE;
3622 }
3623
3624 static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
3625 {
3626 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
3627 u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
3628
3629 if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
3630 ctxt->ops->check_pmc(ctxt, rcx))
3631 return emulate_gp(ctxt, 0);
3632
3633 return X86EMUL_CONTINUE;
3634 }
3635
3636 static int check_perm_in(struct x86_emulate_ctxt *ctxt)
3637 {
3638 ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
3639 if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
3640 return emulate_gp(ctxt, 0);
3641
3642 return X86EMUL_CONTINUE;
3643 }
3644
3645 static int check_perm_out(struct x86_emulate_ctxt *ctxt)
3646 {
3647 ctxt->src.bytes = min(ctxt->src.bytes, 4u);
3648 if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
3649 return emulate_gp(ctxt, 0);
3650
3651 return X86EMUL_CONTINUE;
3652 }
3653
3654 #define D(_y) { .flags = (_y) }
3655 #define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
3656 #define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
3657 .intercept = x86_intercept_##_i, .check_perm = (_p) }
3658 #define N D(NotImpl)
3659 #define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
3660 #define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
3661 #define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
3662 #define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
3663 #define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
3664 #define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
3665 #define II(_f, _e, _i) \
3666 { .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
3667 #define IIP(_f, _e, _i, _p) \
3668 { .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
3669 .intercept = x86_intercept_##_i, .check_perm = (_p) }
3670 #define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
3671
3672 #define D2bv(_f) D((_f) | ByteOp), D(_f)
3673 #define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
3674 #define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
3675 #define F2bv(_f, _e) F((_f) | ByteOp, _e), F(_f, _e)
3676 #define I2bvIP(_f, _e, _i, _p) \
3677 IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
3678
3679 #define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e), \
3680 F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
3681 F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
3682
3683 static const struct opcode group7_rm0[] = {
3684 N,
3685 I(SrcNone | Priv | EmulateOnUD, em_vmcall),
3686 N, N, N, N, N, N,
3687 };
3688
3689 static const struct opcode group7_rm1[] = {
3690 DI(SrcNone | Priv, monitor),
3691 DI(SrcNone | Priv, mwait),
3692 N, N, N, N, N, N,
3693 };
3694
3695 static const struct opcode group7_rm3[] = {
3696 DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa),
3697 II(SrcNone | Prot | EmulateOnUD, em_vmmcall, vmmcall),
3698 DIP(SrcNone | Prot | Priv, vmload, check_svme_pa),
3699 DIP(SrcNone | Prot | Priv, vmsave, check_svme_pa),
3700 DIP(SrcNone | Prot | Priv, stgi, check_svme),
3701 DIP(SrcNone | Prot | Priv, clgi, check_svme),
3702 DIP(SrcNone | Prot | Priv, skinit, check_svme),
3703 DIP(SrcNone | Prot | Priv, invlpga, check_svme),
3704 };
3705
3706 static const struct opcode group7_rm7[] = {
3707 N,
3708 DIP(SrcNone, rdtscp, check_rdtsc),
3709 N, N, N, N, N, N,
3710 };
3711
3712 static const struct opcode group1[] = {
3713 F(Lock, em_add),
3714 F(Lock | PageTable, em_or),
3715 F(Lock, em_adc),
3716 F(Lock, em_sbb),
3717 F(Lock | PageTable, em_and),
3718 F(Lock, em_sub),
3719 F(Lock, em_xor),
3720 F(NoWrite, em_cmp),
3721 };
3722
3723 static const struct opcode group1A[] = {
3724 I(DstMem | SrcNone | Mov | Stack, em_pop), N, N, N, N, N, N, N,
3725 };
3726
3727 static const struct opcode group2[] = {
3728 F(DstMem | ModRM, em_rol),
3729 F(DstMem | ModRM, em_ror),
3730 F(DstMem | ModRM, em_rcl),
3731 F(DstMem | ModRM, em_rcr),
3732 F(DstMem | ModRM, em_shl),
3733 F(DstMem | ModRM, em_shr),
3734 F(DstMem | ModRM, em_shl),
3735 F(DstMem | ModRM, em_sar),
3736 };
3737
3738 static const struct opcode group3[] = {
3739 F(DstMem | SrcImm | NoWrite, em_test),
3740 F(DstMem | SrcImm | NoWrite, em_test),
3741 F(DstMem | SrcNone | Lock, em_not),
3742 F(DstMem | SrcNone | Lock, em_neg),
3743 F(DstXacc | Src2Mem, em_mul_ex),
3744 F(DstXacc | Src2Mem, em_imul_ex),
3745 F(DstXacc | Src2Mem, em_div_ex),
3746 F(DstXacc | Src2Mem, em_idiv_ex),
3747 };
3748
3749 static const struct opcode group4[] = {
3750 F(ByteOp | DstMem | SrcNone | Lock, em_inc),
3751 F(ByteOp | DstMem | SrcNone | Lock, em_dec),
3752 N, N, N, N, N, N,
3753 };
3754
3755 static const struct opcode group5[] = {
3756 F(DstMem | SrcNone | Lock, em_inc),
3757 F(DstMem | SrcNone | Lock, em_dec),
3758 I(SrcMem | Stack, em_grp45),
3759 I(SrcMemFAddr | ImplicitOps | Stack, em_call_far),
3760 I(SrcMem | Stack, em_grp45),
3761 I(SrcMemFAddr | ImplicitOps, em_grp45),
3762 I(SrcMem | Stack, em_grp45), D(Undefined),
3763 };
3764
3765 static const struct opcode group6[] = {
3766 DI(Prot, sldt),
3767 DI(Prot, str),
3768 II(Prot | Priv | SrcMem16, em_lldt, lldt),
3769 II(Prot | Priv | SrcMem16, em_ltr, ltr),
3770 N, N, N, N,
3771 };
3772
3773 static const struct group_dual group7 = { {
3774 II(Mov | DstMem, em_sgdt, sgdt),
3775 II(Mov | DstMem, em_sidt, sidt),
3776 II(SrcMem | Priv, em_lgdt, lgdt),
3777 II(SrcMem | Priv, em_lidt, lidt),
3778 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
3779 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
3780 II(SrcMem | ByteOp | Priv | NoAccess, em_invlpg, invlpg),
3781 }, {
3782 EXT(0, group7_rm0),
3783 EXT(0, group7_rm1),
3784 N, EXT(0, group7_rm3),
3785 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
3786 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
3787 EXT(0, group7_rm7),
3788 } };
3789
3790 static const struct opcode group8[] = {
3791 N, N, N, N,
3792 F(DstMem | SrcImmByte | NoWrite, em_bt),
3793 F(DstMem | SrcImmByte | Lock | PageTable, em_bts),
3794 F(DstMem | SrcImmByte | Lock, em_btr),
3795 F(DstMem | SrcImmByte | Lock | PageTable, em_btc),
3796 };
3797
3798 static const struct group_dual group9 = { {
3799 N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
3800 }, {
3801 N, N, N, N, N, N, N, N,
3802 } };
3803
3804 static const struct opcode group11[] = {
3805 I(DstMem | SrcImm | Mov | PageTable, em_mov),
3806 X7(D(Undefined)),
3807 };
3808
3809 static const struct gprefix pfx_0f_ae_7 = {
3810 I(SrcMem | ByteOp, em_clflush), N, N, N,
3811 };
3812
3813 static const struct group_dual group15 = { {
3814 N, N, N, N, N, N, N, GP(0, &pfx_0f_ae_7),
3815 }, {
3816 N, N, N, N, N, N, N, N,
3817 } };
3818
3819 static const struct gprefix pfx_0f_6f_0f_7f = {
3820 I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
3821 };
3822
3823 static const struct gprefix pfx_0f_2b = {
3824 I(0, em_mov), I(0, em_mov), N, N,
3825 };
3826
3827 static const struct gprefix pfx_0f_28_0f_29 = {
3828 I(Aligned, em_mov), I(Aligned, em_mov), N, N,
3829 };
3830
3831 static const struct gprefix pfx_0f_e7 = {
3832 N, I(Sse, em_mov), N, N,
3833 };
3834
3835 static const struct escape escape_d9 = { {
3836 N, N, N, N, N, N, N, I(DstMem, em_fnstcw),
3837 }, {
3838 /* 0xC0 - 0xC7 */
3839 N, N, N, N, N, N, N, N,
3840 /* 0xC8 - 0xCF */
3841 N, N, N, N, N, N, N, N,
3842 /* 0xD0 - 0xC7 */
3843 N, N, N, N, N, N, N, N,
3844 /* 0xD8 - 0xDF */
3845 N, N, N, N, N, N, N, N,
3846 /* 0xE0 - 0xE7 */
3847 N, N, N, N, N, N, N, N,
3848 /* 0xE8 - 0xEF */
3849 N, N, N, N, N, N, N, N,
3850 /* 0xF0 - 0xF7 */
3851 N, N, N, N, N, N, N, N,
3852 /* 0xF8 - 0xFF */
3853 N, N, N, N, N, N, N, N,
3854 } };
3855
3856 static const struct escape escape_db = { {
3857 N, N, N, N, N, N, N, N,
3858 }, {
3859 /* 0xC0 - 0xC7 */
3860 N, N, N, N, N, N, N, N,
3861 /* 0xC8 - 0xCF */
3862 N, N, N, N, N, N, N, N,
3863 /* 0xD0 - 0xC7 */
3864 N, N, N, N, N, N, N, N,
3865 /* 0xD8 - 0xDF */
3866 N, N, N, N, N, N, N, N,
3867 /* 0xE0 - 0xE7 */
3868 N, N, N, I(ImplicitOps, em_fninit), N, N, N, N,
3869 /* 0xE8 - 0xEF */
3870 N, N, N, N, N, N, N, N,
3871 /* 0xF0 - 0xF7 */
3872 N, N, N, N, N, N, N, N,
3873 /* 0xF8 - 0xFF */
3874 N, N, N, N, N, N, N, N,
3875 } };
3876
3877 static const struct escape escape_dd = { {
3878 N, N, N, N, N, N, N, I(DstMem, em_fnstsw),
3879 }, {
3880 /* 0xC0 - 0xC7 */
3881 N, N, N, N, N, N, N, N,
3882 /* 0xC8 - 0xCF */
3883 N, N, N, N, N, N, N, N,
3884 /* 0xD0 - 0xC7 */
3885 N, N, N, N, N, N, N, N,
3886 /* 0xD8 - 0xDF */
3887 N, N, N, N, N, N, N, N,
3888 /* 0xE0 - 0xE7 */
3889 N, N, N, N, N, N, N, N,
3890 /* 0xE8 - 0xEF */
3891 N, N, N, N, N, N, N, N,
3892 /* 0xF0 - 0xF7 */
3893 N, N, N, N, N, N, N, N,
3894 /* 0xF8 - 0xFF */
3895 N, N, N, N, N, N, N, N,
3896 } };
3897
3898 static const struct opcode opcode_table[256] = {
3899 /* 0x00 - 0x07 */
3900 F6ALU(Lock, em_add),
3901 I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
3902 I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
3903 /* 0x08 - 0x0F */
3904 F6ALU(Lock | PageTable, em_or),
3905 I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
3906 N,
3907 /* 0x10 - 0x17 */
3908 F6ALU(Lock, em_adc),
3909 I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
3910 I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
3911 /* 0x18 - 0x1F */
3912 F6ALU(Lock, em_sbb),
3913 I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
3914 I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
3915 /* 0x20 - 0x27 */
3916 F6ALU(Lock | PageTable, em_and), N, N,
3917 /* 0x28 - 0x2F */
3918 F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
3919 /* 0x30 - 0x37 */
3920 F6ALU(Lock, em_xor), N, N,
3921 /* 0x38 - 0x3F */
3922 F6ALU(NoWrite, em_cmp), N, N,
3923 /* 0x40 - 0x4F */
3924 X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)),
3925 /* 0x50 - 0x57 */
3926 X8(I(SrcReg | Stack, em_push)),
3927 /* 0x58 - 0x5F */
3928 X8(I(DstReg | Stack, em_pop)),
3929 /* 0x60 - 0x67 */
3930 I(ImplicitOps | Stack | No64, em_pusha),
3931 I(ImplicitOps | Stack | No64, em_popa),
3932 N, D(DstReg | SrcMem32 | ModRM | Mov) /* movsxd (x86/64) */ ,
3933 N, N, N, N,
3934 /* 0x68 - 0x6F */
3935 I(SrcImm | Mov | Stack, em_push),
3936 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
3937 I(SrcImmByte | Mov | Stack, em_push),
3938 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
3939 I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */
3940 I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */
3941 /* 0x70 - 0x7F */
3942 X16(D(SrcImmByte)),
3943 /* 0x80 - 0x87 */
3944 G(ByteOp | DstMem | SrcImm, group1),
3945 G(DstMem | SrcImm, group1),
3946 G(ByteOp | DstMem | SrcImm | No64, group1),
3947 G(DstMem | SrcImmByte, group1),
3948 F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test),
3949 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
3950 /* 0x88 - 0x8F */
3951 I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
3952 I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
3953 I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
3954 D(ModRM | SrcMem | NoAccess | DstReg),
3955 I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
3956 G(0, group1A),
3957 /* 0x90 - 0x97 */
3958 DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
3959 /* 0x98 - 0x9F */
3960 D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
3961 I(SrcImmFAddr | No64, em_call_far), N,
3962 II(ImplicitOps | Stack, em_pushf, pushf),
3963 II(ImplicitOps | Stack, em_popf, popf),
3964 I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf),
3965 /* 0xA0 - 0xA7 */
3966 I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
3967 I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
3968 I2bv(SrcSI | DstDI | Mov | String, em_mov),
3969 F2bv(SrcSI | DstDI | String | NoWrite, em_cmp),
3970 /* 0xA8 - 0xAF */
3971 F2bv(DstAcc | SrcImm | NoWrite, em_test),
3972 I2bv(SrcAcc | DstDI | Mov | String, em_mov),
3973 I2bv(SrcSI | DstAcc | Mov | String, em_mov),
3974 F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp),
3975 /* 0xB0 - 0xB7 */
3976 X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
3977 /* 0xB8 - 0xBF */
3978 X8(I(DstReg | SrcImm64 | Mov, em_mov)),
3979 /* 0xC0 - 0xC7 */
3980 G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2),
3981 I(ImplicitOps | Stack | SrcImmU16, em_ret_near_imm),
3982 I(ImplicitOps | Stack, em_ret),
3983 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
3984 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
3985 G(ByteOp, group11), G(0, group11),
3986 /* 0xC8 - 0xCF */
3987 I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave),
3988 I(ImplicitOps | Stack | SrcImmU16, em_ret_far_imm),
3989 I(ImplicitOps | Stack, em_ret_far),
3990 D(ImplicitOps), DI(SrcImmByte, intn),
3991 D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
3992 /* 0xD0 - 0xD7 */
3993 G(Src2One | ByteOp, group2), G(Src2One, group2),
3994 G(Src2CL | ByteOp, group2), G(Src2CL, group2),
3995 I(DstAcc | SrcImmUByte | No64, em_aam),
3996 I(DstAcc | SrcImmUByte | No64, em_aad),
3997 F(DstAcc | ByteOp | No64, em_salc),
3998 I(DstAcc | SrcXLat | ByteOp, em_mov),
3999 /* 0xD8 - 0xDF */
4000 N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
4001 /* 0xE0 - 0xE7 */
4002 X3(I(SrcImmByte, em_loop)),
4003 I(SrcImmByte, em_jcxz),
4004 I2bvIP(SrcImmUByte | DstAcc, em_in, in, check_perm_in),
4005 I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
4006 /* 0xE8 - 0xEF */
4007 I(SrcImm | Stack, em_call), D(SrcImm | ImplicitOps),
4008 I(SrcImmFAddr | No64, em_jmp_far), D(SrcImmByte | ImplicitOps),
4009 I2bvIP(SrcDX | DstAcc, em_in, in, check_perm_in),
4010 I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
4011 /* 0xF0 - 0xF7 */
4012 N, DI(ImplicitOps, icebp), N, N,
4013 DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
4014 G(ByteOp, group3), G(0, group3),
4015 /* 0xF8 - 0xFF */
4016 D(ImplicitOps), D(ImplicitOps),
4017 I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
4018 D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
4019 };
4020
4021 static const struct opcode twobyte_table[256] = {
4022 /* 0x00 - 0x0F */
4023 G(0, group6), GD(0, &group7), N, N,
4024 N, I(ImplicitOps | EmulateOnUD, em_syscall),
4025 II(ImplicitOps | Priv, em_clts, clts), N,
4026 DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
4027 N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
4028 /* 0x10 - 0x1F */
4029 N, N, N, N, N, N, N, N,
4030 D(ImplicitOps | ModRM | SrcMem | NoAccess),
4031 N, N, N, N, N, N, D(ImplicitOps | ModRM | SrcMem | NoAccess),
4032 /* 0x20 - 0x2F */
4033 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read),
4034 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
4035 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
4036 check_cr_write),
4037 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
4038 check_dr_write),
4039 N, N, N, N,
4040 GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29),
4041 GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29),
4042 N, GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_2b),
4043 N, N, N, N,
4044 /* 0x30 - 0x3F */
4045 II(ImplicitOps | Priv, em_wrmsr, wrmsr),
4046 IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
4047 II(ImplicitOps | Priv, em_rdmsr, rdmsr),
4048 IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
4049 I(ImplicitOps | EmulateOnUD, em_sysenter),
4050 I(ImplicitOps | Priv | EmulateOnUD, em_sysexit),
4051 N, N,
4052 N, N, N, N, N, N, N, N,
4053 /* 0x40 - 0x4F */
4054 X16(D(DstReg | SrcMem | ModRM)),
4055 /* 0x50 - 0x5F */
4056 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4057 /* 0x60 - 0x6F */
4058 N, N, N, N,
4059 N, N, N, N,
4060 N, N, N, N,
4061 N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
4062 /* 0x70 - 0x7F */
4063 N, N, N, N,
4064 N, N, N, N,
4065 N, N, N, N,
4066 N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
4067 /* 0x80 - 0x8F */
4068 X16(D(SrcImm)),
4069 /* 0x90 - 0x9F */
4070 X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
4071 /* 0xA0 - 0xA7 */
4072 I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
4073 II(ImplicitOps, em_cpuid, cpuid),
4074 F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt),
4075 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld),
4076 F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N,
4077 /* 0xA8 - 0xAF */
4078 I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
4079 DI(ImplicitOps, rsm),
4080 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
4081 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
4082 F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
4083 GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul),
4084 /* 0xB0 - 0xB7 */
4085 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_cmpxchg),
4086 I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
4087 F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
4088 I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
4089 I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
4090 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4091 /* 0xB8 - 0xBF */
4092 N, N,
4093 G(BitOp, group8),
4094 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
4095 F(DstReg | SrcMem | ModRM, em_bsf), F(DstReg | SrcMem | ModRM, em_bsr),
4096 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4097 /* 0xC0 - 0xC7 */
4098 F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
4099 N, D(DstMem | SrcReg | ModRM | Mov),
4100 N, N, N, GD(0, &group9),
4101 /* 0xC8 - 0xCF */
4102 X8(I(DstReg, em_bswap)),
4103 /* 0xD0 - 0xDF */
4104 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4105 /* 0xE0 - 0xEF */
4106 N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_e7),
4107 N, N, N, N, N, N, N, N,
4108 /* 0xF0 - 0xFF */
4109 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
4110 };
4111
4112 static const struct gprefix three_byte_0f_38_f0 = {
4113 I(DstReg | SrcMem | Mov, em_movbe), N, N, N
4114 };
4115
4116 static const struct gprefix three_byte_0f_38_f1 = {
4117 I(DstMem | SrcReg | Mov, em_movbe), N, N, N
4118 };
4119
4120 /*
4121 * Insns below are selected by the prefix which indexed by the third opcode
4122 * byte.
4123 */
4124 static const struct opcode opcode_map_0f_38[256] = {
4125 /* 0x00 - 0x7f */
4126 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4127 /* 0x80 - 0xef */
4128 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4129 /* 0xf0 - 0xf1 */
4130 GP(EmulateOnUD | ModRM | Prefix, &three_byte_0f_38_f0),
4131 GP(EmulateOnUD | ModRM | Prefix, &three_byte_0f_38_f1),
4132 /* 0xf2 - 0xff */
4133 N, N, X4(N), X8(N)
4134 };
4135
4136 #undef D
4137 #undef N
4138 #undef G
4139 #undef GD
4140 #undef I
4141 #undef GP
4142 #undef EXT
4143
4144 #undef D2bv
4145 #undef D2bvIP
4146 #undef I2bv
4147 #undef I2bvIP
4148 #undef I6ALU
4149
4150 static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
4151 {
4152 unsigned size;
4153
4154 size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4155 if (size == 8)
4156 size = 4;
4157 return size;
4158 }
4159
4160 static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
4161 unsigned size, bool sign_extension)
4162 {
4163 int rc = X86EMUL_CONTINUE;
4164
4165 op->type = OP_IMM;
4166 op->bytes = size;
4167 op->addr.mem.ea = ctxt->_eip;
4168 /* NB. Immediates are sign-extended as necessary. */
4169 switch (op->bytes) {
4170 case 1:
4171 op->val = insn_fetch(s8, ctxt);
4172 break;
4173 case 2:
4174 op->val = insn_fetch(s16, ctxt);
4175 break;
4176 case 4:
4177 op->val = insn_fetch(s32, ctxt);
4178 break;
4179 case 8:
4180 op->val = insn_fetch(s64, ctxt);
4181 break;
4182 }
4183 if (!sign_extension) {
4184 switch (op->bytes) {
4185 case 1:
4186 op->val &= 0xff;
4187 break;
4188 case 2:
4189 op->val &= 0xffff;
4190 break;
4191 case 4:
4192 op->val &= 0xffffffff;
4193 break;
4194 }
4195 }
4196 done:
4197 return rc;
4198 }
4199
4200 static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
4201 unsigned d)
4202 {
4203 int rc = X86EMUL_CONTINUE;
4204
4205 switch (d) {
4206 case OpReg:
4207 decode_register_operand(ctxt, op);
4208 break;
4209 case OpImmUByte:
4210 rc = decode_imm(ctxt, op, 1, false);
4211 break;
4212 case OpMem:
4213 ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4214 mem_common:
4215 *op = ctxt->memop;
4216 ctxt->memopp = op;
4217 if (ctxt->d & BitOp)
4218 fetch_bit_operand(ctxt);
4219 op->orig_val = op->val;
4220 break;
4221 case OpMem64:
4222 ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8;
4223 goto mem_common;
4224 case OpAcc:
4225 op->type = OP_REG;
4226 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4227 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4228 fetch_register_operand(op);
4229 op->orig_val = op->val;
4230 break;
4231 case OpAccLo:
4232 op->type = OP_REG;
4233 op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes;
4234 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4235 fetch_register_operand(op);
4236 op->orig_val = op->val;
4237 break;
4238 case OpAccHi:
4239 if (ctxt->d & ByteOp) {
4240 op->type = OP_NONE;
4241 break;
4242 }
4243 op->type = OP_REG;
4244 op->bytes = ctxt->op_bytes;
4245 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4246 fetch_register_operand(op);
4247 op->orig_val = op->val;
4248 break;
4249 case OpDI:
4250 op->type = OP_MEM;
4251 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4252 op->addr.mem.ea =
4253 register_address(ctxt, reg_read(ctxt, VCPU_REGS_RDI));
4254 op->addr.mem.seg = VCPU_SREG_ES;
4255 op->val = 0;
4256 op->count = 1;
4257 break;
4258 case OpDX:
4259 op->type = OP_REG;
4260 op->bytes = 2;
4261 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4262 fetch_register_operand(op);
4263 break;
4264 case OpCL:
4265 op->bytes = 1;
4266 op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
4267 break;
4268 case OpImmByte:
4269 rc = decode_imm(ctxt, op, 1, true);
4270 break;
4271 case OpOne:
4272 op->bytes = 1;
4273 op->val = 1;
4274 break;
4275 case OpImm:
4276 rc = decode_imm(ctxt, op, imm_size(ctxt), true);
4277 break;
4278 case OpImm64:
4279 rc = decode_imm(ctxt, op, ctxt->op_bytes, true);
4280 break;
4281 case OpMem8:
4282 ctxt->memop.bytes = 1;
4283 if (ctxt->memop.type == OP_REG) {
4284 ctxt->memop.addr.reg = decode_register(ctxt,
4285 ctxt->modrm_rm, true);
4286 fetch_register_operand(&ctxt->memop);
4287 }
4288 goto mem_common;
4289 case OpMem16:
4290 ctxt->memop.bytes = 2;
4291 goto mem_common;
4292 case OpMem32:
4293 ctxt->memop.bytes = 4;
4294 goto mem_common;
4295 case OpImmU16:
4296 rc = decode_imm(ctxt, op, 2, false);
4297 break;
4298 case OpImmU:
4299 rc = decode_imm(ctxt, op, imm_size(ctxt), false);
4300 break;
4301 case OpSI:
4302 op->type = OP_MEM;
4303 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4304 op->addr.mem.ea =
4305 register_address(ctxt, reg_read(ctxt, VCPU_REGS_RSI));
4306 op->addr.mem.seg = ctxt->seg_override;
4307 op->val = 0;
4308 op->count = 1;
4309 break;
4310 case OpXLat:
4311 op->type = OP_MEM;
4312 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4313 op->addr.mem.ea =
4314 register_address(ctxt,
4315 reg_read(ctxt, VCPU_REGS_RBX) +
4316 (reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
4317 op->addr.mem.seg = ctxt->seg_override;
4318 op->val = 0;
4319 break;
4320 case OpImmFAddr:
4321 op->type = OP_IMM;
4322 op->addr.mem.ea = ctxt->_eip;
4323 op->bytes = ctxt->op_bytes + 2;
4324 insn_fetch_arr(op->valptr, op->bytes, ctxt);
4325 break;
4326 case OpMemFAddr:
4327 ctxt->memop.bytes = ctxt->op_bytes + 2;
4328 goto mem_common;
4329 case OpES:
4330 op->val = VCPU_SREG_ES;
4331 break;
4332 case OpCS:
4333 op->val = VCPU_SREG_CS;
4334 break;
4335 case OpSS:
4336 op->val = VCPU_SREG_SS;
4337 break;
4338 case OpDS:
4339 op->val = VCPU_SREG_DS;
4340 break;
4341 case OpFS:
4342 op->val = VCPU_SREG_FS;
4343 break;
4344 case OpGS:
4345 op->val = VCPU_SREG_GS;
4346 break;
4347 case OpImplicit:
4348 /* Special instructions do their own operand decoding. */
4349 default:
4350 op->type = OP_NONE; /* Disable writeback. */
4351 break;
4352 }
4353
4354 done:
4355 return rc;
4356 }
4357
4358 int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
4359 {
4360 int rc = X86EMUL_CONTINUE;
4361 int mode = ctxt->mode;
4362 int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
4363 bool op_prefix = false;
4364 bool has_seg_override = false;
4365 struct opcode opcode;
4366
4367 ctxt->memop.type = OP_NONE;
4368 ctxt->memopp = NULL;
4369 ctxt->_eip = ctxt->eip;
4370 ctxt->fetch.ptr = ctxt->fetch.data;
4371 ctxt->fetch.end = ctxt->fetch.data + insn_len;
4372 ctxt->opcode_len = 1;
4373 if (insn_len > 0)
4374 memcpy(ctxt->fetch.data, insn, insn_len);
4375 else {
4376 rc = __do_insn_fetch_bytes(ctxt, 1);
4377 if (rc != X86EMUL_CONTINUE)
4378 return rc;
4379 }
4380
4381 switch (mode) {
4382 case X86EMUL_MODE_REAL:
4383 case X86EMUL_MODE_VM86:
4384 case X86EMUL_MODE_PROT16:
4385 def_op_bytes = def_ad_bytes = 2;
4386 break;
4387 case X86EMUL_MODE_PROT32:
4388 def_op_bytes = def_ad_bytes = 4;
4389 break;
4390 #ifdef CONFIG_X86_64
4391 case X86EMUL_MODE_PROT64:
4392 def_op_bytes = 4;
4393 def_ad_bytes = 8;
4394 break;
4395 #endif
4396 default:
4397 return EMULATION_FAILED;
4398 }
4399
4400 ctxt->op_bytes = def_op_bytes;
4401 ctxt->ad_bytes = def_ad_bytes;
4402
4403 /* Legacy prefixes. */
4404 for (;;) {
4405 switch (ctxt->b = insn_fetch(u8, ctxt)) {
4406 case 0x66: /* operand-size override */
4407 op_prefix = true;
4408 /* switch between 2/4 bytes */
4409 ctxt->op_bytes = def_op_bytes ^ 6;
4410 break;
4411 case 0x67: /* address-size override */
4412 if (mode == X86EMUL_MODE_PROT64)
4413 /* switch between 4/8 bytes */
4414 ctxt->ad_bytes = def_ad_bytes ^ 12;
4415 else
4416 /* switch between 2/4 bytes */
4417 ctxt->ad_bytes = def_ad_bytes ^ 6;
4418 break;
4419 case 0x26: /* ES override */
4420 case 0x2e: /* CS override */
4421 case 0x36: /* SS override */
4422 case 0x3e: /* DS override */
4423 has_seg_override = true;
4424 ctxt->seg_override = (ctxt->b >> 3) & 3;
4425 break;
4426 case 0x64: /* FS override */
4427 case 0x65: /* GS override */
4428 has_seg_override = true;
4429 ctxt->seg_override = ctxt->b & 7;
4430 break;
4431 case 0x40 ... 0x4f: /* REX */
4432 if (mode != X86EMUL_MODE_PROT64)
4433 goto done_prefixes;
4434 ctxt->rex_prefix = ctxt->b;
4435 continue;
4436 case 0xf0: /* LOCK */
4437 ctxt->lock_prefix = 1;
4438 break;
4439 case 0xf2: /* REPNE/REPNZ */
4440 case 0xf3: /* REP/REPE/REPZ */
4441 ctxt->rep_prefix = ctxt->b;
4442 break;
4443 default:
4444 goto done_prefixes;
4445 }
4446
4447 /* Any legacy prefix after a REX prefix nullifies its effect. */
4448
4449 ctxt->rex_prefix = 0;
4450 }
4451
4452 done_prefixes:
4453
4454 /* REX prefix. */
4455 if (ctxt->rex_prefix & 8)
4456 ctxt->op_bytes = 8; /* REX.W */
4457
4458 /* Opcode byte(s). */
4459 opcode = opcode_table[ctxt->b];
4460 /* Two-byte opcode? */
4461 if (ctxt->b == 0x0f) {
4462 ctxt->opcode_len = 2;
4463 ctxt->b = insn_fetch(u8, ctxt);
4464 opcode = twobyte_table[ctxt->b];
4465
4466 /* 0F_38 opcode map */
4467 if (ctxt->b == 0x38) {
4468 ctxt->opcode_len = 3;
4469 ctxt->b = insn_fetch(u8, ctxt);
4470 opcode = opcode_map_0f_38[ctxt->b];
4471 }
4472 }
4473 ctxt->d = opcode.flags;
4474
4475 if (ctxt->d & ModRM)
4476 ctxt->modrm = insn_fetch(u8, ctxt);
4477
4478 /* vex-prefix instructions are not implemented */
4479 if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
4480 (mode == X86EMUL_MODE_PROT64 ||
4481 (mode >= X86EMUL_MODE_PROT16 && (ctxt->modrm & 0x80)))) {
4482 ctxt->d = NotImpl;
4483 }
4484
4485 while (ctxt->d & GroupMask) {
4486 switch (ctxt->d & GroupMask) {
4487 case Group:
4488 goffset = (ctxt->modrm >> 3) & 7;
4489 opcode = opcode.u.group[goffset];
4490 break;
4491 case GroupDual:
4492 goffset = (ctxt->modrm >> 3) & 7;
4493 if ((ctxt->modrm >> 6) == 3)
4494 opcode = opcode.u.gdual->mod3[goffset];
4495 else
4496 opcode = opcode.u.gdual->mod012[goffset];
4497 break;
4498 case RMExt:
4499 goffset = ctxt->modrm & 7;
4500 opcode = opcode.u.group[goffset];
4501 break;
4502 case Prefix:
4503 if (ctxt->rep_prefix && op_prefix)
4504 return EMULATION_FAILED;
4505 simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
4506 switch (simd_prefix) {
4507 case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
4508 case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
4509 case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
4510 case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
4511 }
4512 break;
4513 case Escape:
4514 if (ctxt->modrm > 0xbf)
4515 opcode = opcode.u.esc->high[ctxt->modrm - 0xc0];
4516 else
4517 opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
4518 break;
4519 default:
4520 return EMULATION_FAILED;
4521 }
4522
4523 ctxt->d &= ~(u64)GroupMask;
4524 ctxt->d |= opcode.flags;
4525 }
4526
4527 /* Unrecognised? */
4528 if (ctxt->d == 0)
4529 return EMULATION_FAILED;
4530
4531 ctxt->execute = opcode.u.execute;
4532
4533 if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD)))
4534 return EMULATION_FAILED;
4535
4536 if (unlikely(ctxt->d &
4537 (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm))) {
4538 /*
4539 * These are copied unconditionally here, and checked unconditionally
4540 * in x86_emulate_insn.
4541 */
4542 ctxt->check_perm = opcode.check_perm;
4543 ctxt->intercept = opcode.intercept;
4544
4545 if (ctxt->d & NotImpl)
4546 return EMULATION_FAILED;
4547
4548 if (mode == X86EMUL_MODE_PROT64 && (ctxt->d & Stack))
4549 ctxt->op_bytes = 8;
4550
4551 if (ctxt->d & Op3264) {
4552 if (mode == X86EMUL_MODE_PROT64)
4553 ctxt->op_bytes = 8;
4554 else
4555 ctxt->op_bytes = 4;
4556 }
4557
4558 if (ctxt->d & Sse)
4559 ctxt->op_bytes = 16;
4560 else if (ctxt->d & Mmx)
4561 ctxt->op_bytes = 8;
4562 }
4563
4564 /* ModRM and SIB bytes. */
4565 if (ctxt->d & ModRM) {
4566 rc = decode_modrm(ctxt, &ctxt->memop);
4567 if (!has_seg_override) {
4568 has_seg_override = true;
4569 ctxt->seg_override = ctxt->modrm_seg;
4570 }
4571 } else if (ctxt->d & MemAbs)
4572 rc = decode_abs(ctxt, &ctxt->memop);
4573 if (rc != X86EMUL_CONTINUE)
4574 goto done;
4575
4576 if (!has_seg_override)
4577 ctxt->seg_override = VCPU_SREG_DS;
4578
4579 ctxt->memop.addr.mem.seg = ctxt->seg_override;
4580
4581 /*
4582 * Decode and fetch the source operand: register, memory
4583 * or immediate.
4584 */
4585 rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
4586 if (rc != X86EMUL_CONTINUE)
4587 goto done;
4588
4589 /*
4590 * Decode and fetch the second source operand: register, memory
4591 * or immediate.
4592 */
4593 rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
4594 if (rc != X86EMUL_CONTINUE)
4595 goto done;
4596
4597 /* Decode and fetch the destination operand: register or memory. */
4598 rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
4599
4600 if (ctxt->rip_relative)
4601 ctxt->memopp->addr.mem.ea += ctxt->_eip;
4602
4603 done:
4604 return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
4605 }
4606
4607 bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
4608 {
4609 return ctxt->d & PageTable;
4610 }
4611
4612 static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
4613 {
4614 /* The second termination condition only applies for REPE
4615 * and REPNE. Test if the repeat string operation prefix is
4616 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
4617 * corresponding termination condition according to:
4618 * - if REPE/REPZ and ZF = 0 then done
4619 * - if REPNE/REPNZ and ZF = 1 then done
4620 */
4621 if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
4622 (ctxt->b == 0xae) || (ctxt->b == 0xaf))
4623 && (((ctxt->rep_prefix == REPE_PREFIX) &&
4624 ((ctxt->eflags & EFLG_ZF) == 0))
4625 || ((ctxt->rep_prefix == REPNE_PREFIX) &&
4626 ((ctxt->eflags & EFLG_ZF) == EFLG_ZF))))
4627 return true;
4628
4629 return false;
4630 }
4631
4632 static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
4633 {
4634 bool fault = false;
4635
4636 ctxt->ops->get_fpu(ctxt);
4637 asm volatile("1: fwait \n\t"
4638 "2: \n\t"
4639 ".pushsection .fixup,\"ax\" \n\t"
4640 "3: \n\t"
4641 "movb $1, %[fault] \n\t"
4642 "jmp 2b \n\t"
4643 ".popsection \n\t"
4644 _ASM_EXTABLE(1b, 3b)
4645 : [fault]"+qm"(fault));
4646 ctxt->ops->put_fpu(ctxt);
4647
4648 if (unlikely(fault))
4649 return emulate_exception(ctxt, MF_VECTOR, 0, false);
4650
4651 return X86EMUL_CONTINUE;
4652 }
4653
4654 static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt,
4655 struct operand *op)
4656 {
4657 if (op->type == OP_MM)
4658 read_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
4659 }
4660
4661 static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *))
4662 {
4663 ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
4664 if (!(ctxt->d & ByteOp))
4665 fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
4666 asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n"
4667 : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
4668 [fastop]"+S"(fop)
4669 : "c"(ctxt->src2.val));
4670 ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
4671 if (!fop) /* exception is returned in fop variable */
4672 return emulate_de(ctxt);
4673 return X86EMUL_CONTINUE;
4674 }
4675
4676 void init_decode_cache(struct x86_emulate_ctxt *ctxt)
4677 {
4678 memset(&ctxt->rip_relative, 0,
4679 (void *)&ctxt->modrm - (void *)&ctxt->rip_relative);
4680
4681 ctxt->io_read.pos = 0;
4682 ctxt->io_read.end = 0;
4683 ctxt->mem_read.end = 0;
4684 }
4685
4686 int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
4687 {
4688 const struct x86_emulate_ops *ops = ctxt->ops;
4689 int rc = X86EMUL_CONTINUE;
4690 int saved_dst_type = ctxt->dst.type;
4691
4692 ctxt->mem_read.pos = 0;
4693
4694 /* LOCK prefix is allowed only with some instructions */
4695 if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
4696 rc = emulate_ud(ctxt);
4697 goto done;
4698 }
4699
4700 if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
4701 rc = emulate_ud(ctxt);
4702 goto done;
4703 }
4704
4705 if (unlikely(ctxt->d &
4706 (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
4707 if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
4708 (ctxt->d & Undefined)) {
4709 rc = emulate_ud(ctxt);
4710 goto done;
4711 }
4712
4713 if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
4714 || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
4715 rc = emulate_ud(ctxt);
4716 goto done;
4717 }
4718
4719 if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
4720 rc = emulate_nm(ctxt);
4721 goto done;
4722 }
4723
4724 if (ctxt->d & Mmx) {
4725 rc = flush_pending_x87_faults(ctxt);
4726 if (rc != X86EMUL_CONTINUE)
4727 goto done;
4728 /*
4729 * Now that we know the fpu is exception safe, we can fetch
4730 * operands from it.
4731 */
4732 fetch_possible_mmx_operand(ctxt, &ctxt->src);
4733 fetch_possible_mmx_operand(ctxt, &ctxt->src2);
4734 if (!(ctxt->d & Mov))
4735 fetch_possible_mmx_operand(ctxt, &ctxt->dst);
4736 }
4737
4738 if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
4739 rc = emulator_check_intercept(ctxt, ctxt->intercept,
4740 X86_ICPT_PRE_EXCEPT);
4741 if (rc != X86EMUL_CONTINUE)
4742 goto done;
4743 }
4744
4745 /* Privileged instruction can be executed only in CPL=0 */
4746 if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
4747 if (ctxt->d & PrivUD)
4748 rc = emulate_ud(ctxt);
4749 else
4750 rc = emulate_gp(ctxt, 0);
4751 goto done;
4752 }
4753
4754 /* Instruction can only be executed in protected mode */
4755 if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
4756 rc = emulate_ud(ctxt);
4757 goto done;
4758 }
4759
4760 /* Do instruction specific permission checks */
4761 if (ctxt->d & CheckPerm) {
4762 rc = ctxt->check_perm(ctxt);
4763 if (rc != X86EMUL_CONTINUE)
4764 goto done;
4765 }
4766
4767 if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
4768 rc = emulator_check_intercept(ctxt, ctxt->intercept,
4769 X86_ICPT_POST_EXCEPT);
4770 if (rc != X86EMUL_CONTINUE)
4771 goto done;
4772 }
4773
4774 if (ctxt->rep_prefix && (ctxt->d & String)) {
4775 /* All REP prefixes have the same first termination condition */
4776 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
4777 ctxt->eip = ctxt->_eip;
4778 ctxt->eflags &= ~EFLG_RF;
4779 goto done;
4780 }
4781 }
4782 }
4783
4784 if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
4785 rc = segmented_read(ctxt, ctxt->src.addr.mem,
4786 ctxt->src.valptr, ctxt->src.bytes);
4787 if (rc != X86EMUL_CONTINUE)
4788 goto done;
4789 ctxt->src.orig_val64 = ctxt->src.val64;
4790 }
4791
4792 if (ctxt->src2.type == OP_MEM) {
4793 rc = segmented_read(ctxt, ctxt->src2.addr.mem,
4794 &ctxt->src2.val, ctxt->src2.bytes);
4795 if (rc != X86EMUL_CONTINUE)
4796 goto done;
4797 }
4798
4799 if ((ctxt->d & DstMask) == ImplicitOps)
4800 goto special_insn;
4801
4802
4803 if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
4804 /* optimisation - avoid slow emulated read if Mov */
4805 rc = segmented_read(ctxt, ctxt->dst.addr.mem,
4806 &ctxt->dst.val, ctxt->dst.bytes);
4807 if (rc != X86EMUL_CONTINUE)
4808 goto done;
4809 }
4810 ctxt->dst.orig_val = ctxt->dst.val;
4811
4812 special_insn:
4813
4814 if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
4815 rc = emulator_check_intercept(ctxt, ctxt->intercept,
4816 X86_ICPT_POST_MEMACCESS);
4817 if (rc != X86EMUL_CONTINUE)
4818 goto done;
4819 }
4820
4821 if (ctxt->rep_prefix && (ctxt->d & String))
4822 ctxt->eflags |= EFLG_RF;
4823 else
4824 ctxt->eflags &= ~EFLG_RF;
4825
4826 if (ctxt->execute) {
4827 if (ctxt->d & Fastop) {
4828 void (*fop)(struct fastop *) = (void *)ctxt->execute;
4829 rc = fastop(ctxt, fop);
4830 if (rc != X86EMUL_CONTINUE)
4831 goto done;
4832 goto writeback;
4833 }
4834 rc = ctxt->execute(ctxt);
4835 if (rc != X86EMUL_CONTINUE)
4836 goto done;
4837 goto writeback;
4838 }
4839
4840 if (ctxt->opcode_len == 2)
4841 goto twobyte_insn;
4842 else if (ctxt->opcode_len == 3)
4843 goto threebyte_insn;
4844
4845 switch (ctxt->b) {
4846 case 0x63: /* movsxd */
4847 if (ctxt->mode != X86EMUL_MODE_PROT64)
4848 goto cannot_emulate;
4849 ctxt->dst.val = (s32) ctxt->src.val;
4850 break;
4851 case 0x70 ... 0x7f: /* jcc (short) */
4852 if (test_cc(ctxt->b, ctxt->eflags))
4853 rc = jmp_rel(ctxt, ctxt->src.val);
4854 break;
4855 case 0x8d: /* lea r16/r32, m */
4856 ctxt->dst.val = ctxt->src.addr.mem.ea;
4857 break;
4858 case 0x90 ... 0x97: /* nop / xchg reg, rax */
4859 if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX))
4860 ctxt->dst.type = OP_NONE;
4861 else
4862 rc = em_xchg(ctxt);
4863 break;
4864 case 0x98: /* cbw/cwde/cdqe */
4865 switch (ctxt->op_bytes) {
4866 case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
4867 case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
4868 case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
4869 }
4870 break;
4871 case 0xcc: /* int3 */
4872 rc = emulate_int(ctxt, 3);
4873 break;
4874 case 0xcd: /* int n */
4875 rc = emulate_int(ctxt, ctxt->src.val);
4876 break;
4877 case 0xce: /* into */
4878 if (ctxt->eflags & EFLG_OF)
4879 rc = emulate_int(ctxt, 4);
4880 break;
4881 case 0xe9: /* jmp rel */
4882 case 0xeb: /* jmp rel short */
4883 rc = jmp_rel(ctxt, ctxt->src.val);
4884 ctxt->dst.type = OP_NONE; /* Disable writeback. */
4885 break;
4886 case 0xf4: /* hlt */
4887 ctxt->ops->halt(ctxt);
4888 break;
4889 case 0xf5: /* cmc */
4890 /* complement carry flag from eflags reg */
4891 ctxt->eflags ^= EFLG_CF;
4892 break;
4893 case 0xf8: /* clc */
4894 ctxt->eflags &= ~EFLG_CF;
4895 break;
4896 case 0xf9: /* stc */
4897 ctxt->eflags |= EFLG_CF;
4898 break;
4899 case 0xfc: /* cld */
4900 ctxt->eflags &= ~EFLG_DF;
4901 break;
4902 case 0xfd: /* std */
4903 ctxt->eflags |= EFLG_DF;
4904 break;
4905 default:
4906 goto cannot_emulate;
4907 }
4908
4909 if (rc != X86EMUL_CONTINUE)
4910 goto done;
4911
4912 writeback:
4913 if (ctxt->d & SrcWrite) {
4914 BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR);
4915 rc = writeback(ctxt, &ctxt->src);
4916 if (rc != X86EMUL_CONTINUE)
4917 goto done;
4918 }
4919 if (!(ctxt->d & NoWrite)) {
4920 rc = writeback(ctxt, &ctxt->dst);
4921 if (rc != X86EMUL_CONTINUE)
4922 goto done;
4923 }
4924
4925 /*
4926 * restore dst type in case the decoding will be reused
4927 * (happens for string instruction )
4928 */
4929 ctxt->dst.type = saved_dst_type;
4930
4931 if ((ctxt->d & SrcMask) == SrcSI)
4932 string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src);
4933
4934 if ((ctxt->d & DstMask) == DstDI)
4935 string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst);
4936
4937 if (ctxt->rep_prefix && (ctxt->d & String)) {
4938 unsigned int count;
4939 struct read_cache *r = &ctxt->io_read;
4940 if ((ctxt->d & SrcMask) == SrcSI)
4941 count = ctxt->src.count;
4942 else
4943 count = ctxt->dst.count;
4944 register_address_increment(ctxt, reg_rmw(ctxt, VCPU_REGS_RCX),
4945 -count);
4946
4947 if (!string_insn_completed(ctxt)) {
4948 /*
4949 * Re-enter guest when pio read ahead buffer is empty
4950 * or, if it is not used, after each 1024 iteration.
4951 */
4952 if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) &&
4953 (r->end == 0 || r->end != r->pos)) {
4954 /*
4955 * Reset read cache. Usually happens before
4956 * decode, but since instruction is restarted
4957 * we have to do it here.
4958 */
4959 ctxt->mem_read.end = 0;
4960 writeback_registers(ctxt);
4961 return EMULATION_RESTART;
4962 }
4963 goto done; /* skip rip writeback */
4964 }
4965 ctxt->eflags &= ~EFLG_RF;
4966 }
4967
4968 ctxt->eip = ctxt->_eip;
4969
4970 done:
4971 if (rc == X86EMUL_PROPAGATE_FAULT) {
4972 WARN_ON(ctxt->exception.vector > 0x1f);
4973 ctxt->have_exception = true;
4974 }
4975 if (rc == X86EMUL_INTERCEPTED)
4976 return EMULATION_INTERCEPTED;
4977
4978 if (rc == X86EMUL_CONTINUE)
4979 writeback_registers(ctxt);
4980
4981 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
4982
4983 twobyte_insn:
4984 switch (ctxt->b) {
4985 case 0x09: /* wbinvd */
4986 (ctxt->ops->wbinvd)(ctxt);
4987 break;
4988 case 0x08: /* invd */
4989 case 0x0d: /* GrpP (prefetch) */
4990 case 0x18: /* Grp16 (prefetch/nop) */
4991 case 0x1f: /* nop */
4992 break;
4993 case 0x20: /* mov cr, reg */
4994 ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
4995 break;
4996 case 0x21: /* mov from dr to reg */
4997 ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
4998 break;
4999 case 0x40 ... 0x4f: /* cmov */
5000 if (test_cc(ctxt->b, ctxt->eflags))
5001 ctxt->dst.val = ctxt->src.val;
5002 else if (ctxt->mode != X86EMUL_MODE_PROT64 ||
5003 ctxt->op_bytes != 4)
5004 ctxt->dst.type = OP_NONE; /* no writeback */
5005 break;
5006 case 0x80 ... 0x8f: /* jnz rel, etc*/
5007 if (test_cc(ctxt->b, ctxt->eflags))
5008 rc = jmp_rel(ctxt, ctxt->src.val);
5009 break;
5010 case 0x90 ... 0x9f: /* setcc r/m8 */
5011 ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
5012 break;
5013 case 0xb6 ... 0xb7: /* movzx */
5014 ctxt->dst.bytes = ctxt->op_bytes;
5015 ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
5016 : (u16) ctxt->src.val;
5017 break;
5018 case 0xbe ... 0xbf: /* movsx */
5019 ctxt->dst.bytes = ctxt->op_bytes;
5020 ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
5021 (s16) ctxt->src.val;
5022 break;
5023 case 0xc3: /* movnti */
5024 ctxt->dst.bytes = ctxt->op_bytes;
5025 ctxt->dst.val = (ctxt->op_bytes == 8) ? (u64) ctxt->src.val :
5026 (u32) ctxt->src.val;
5027 break;
5028 default:
5029 goto cannot_emulate;
5030 }
5031
5032 threebyte_insn:
5033
5034 if (rc != X86EMUL_CONTINUE)
5035 goto done;
5036
5037 goto writeback;
5038
5039 cannot_emulate:
5040 return EMULATION_FAILED;
5041 }
5042
5043 void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt)
5044 {
5045 invalidate_registers(ctxt);
5046 }
5047
5048 void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt)
5049 {
5050 writeback_registers(ctxt);
5051 }