]> git.proxmox.com Git - mirror_qemu.git/blob - target/i386/hvf/x86_emu.c
Merge remote-tracking branch 'remotes/stsquad/tags/pull-rc3-testing-261119-1' into...
[mirror_qemu.git] / target / i386 / hvf / x86_emu.c
1 /*
2 * Copyright (C) 2016 Veertu Inc,
3 * Copyright (C) 2017 Google Inc,
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
14 *
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this program; if not, see <http://www.gnu.org/licenses/>.
17 */
18
19 /////////////////////////////////////////////////////////////////////////
20 //
21 // Copyright (C) 2001-2012 The Bochs Project
22 //
23 // This library is free software; you can redistribute it and/or
24 // modify it under the terms of the GNU Lesser General Public
25 // License as published by the Free Software Foundation; either
26 // version 2 of the License, or (at your option) any later version.
27 //
28 // This library is distributed in the hope that it will be useful,
29 // but WITHOUT ANY WARRANTY; without even the implied warranty of
30 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
31 // Lesser General Public License for more details.
32 //
33 // You should have received a copy of the GNU Lesser General Public
34 // License along with this library; if not, write to the Free Software
35 // Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA B 02110-1301 USA
36 /////////////////////////////////////////////////////////////////////////
37
38 #include "qemu/osdep.h"
39 #include "panic.h"
40 #include "qemu-common.h"
41 #include "x86_decode.h"
42 #include "x86.h"
43 #include "x86_emu.h"
44 #include "x86_mmu.h"
45 #include "x86_flags.h"
46 #include "vmcs.h"
47 #include "vmx.h"
48
49 void hvf_handle_io(struct CPUState *cpu, uint16_t port, void *data,
50 int direction, int size, uint32_t count);
51
52 #define EXEC_2OP_FLAGS_CMD(env, decode, cmd, FLAGS_FUNC, save_res) \
53 { \
54 fetch_operands(env, decode, 2, true, true, false); \
55 switch (decode->operand_size) { \
56 case 1: \
57 { \
58 uint8_t v1 = (uint8_t)decode->op[0].val; \
59 uint8_t v2 = (uint8_t)decode->op[1].val; \
60 uint8_t diff = v1 cmd v2; \
61 if (save_res) { \
62 write_val_ext(env, decode->op[0].ptr, diff, 1); \
63 } \
64 FLAGS_FUNC##8(env, v1, v2, diff); \
65 break; \
66 } \
67 case 2: \
68 { \
69 uint16_t v1 = (uint16_t)decode->op[0].val; \
70 uint16_t v2 = (uint16_t)decode->op[1].val; \
71 uint16_t diff = v1 cmd v2; \
72 if (save_res) { \
73 write_val_ext(env, decode->op[0].ptr, diff, 2); \
74 } \
75 FLAGS_FUNC##16(env, v1, v2, diff); \
76 break; \
77 } \
78 case 4: \
79 { \
80 uint32_t v1 = (uint32_t)decode->op[0].val; \
81 uint32_t v2 = (uint32_t)decode->op[1].val; \
82 uint32_t diff = v1 cmd v2; \
83 if (save_res) { \
84 write_val_ext(env, decode->op[0].ptr, diff, 4); \
85 } \
86 FLAGS_FUNC##32(env, v1, v2, diff); \
87 break; \
88 } \
89 default: \
90 VM_PANIC("bad size\n"); \
91 } \
92 } \
93
94 target_ulong read_reg(CPUX86State *env, int reg, int size)
95 {
96 switch (size) {
97 case 1:
98 return env->hvf_emul->regs[reg].lx;
99 case 2:
100 return env->hvf_emul->regs[reg].rx;
101 case 4:
102 return env->hvf_emul->regs[reg].erx;
103 case 8:
104 return env->hvf_emul->regs[reg].rrx;
105 default:
106 abort();
107 }
108 return 0;
109 }
110
111 void write_reg(CPUX86State *env, int reg, target_ulong val, int size)
112 {
113 switch (size) {
114 case 1:
115 env->hvf_emul->regs[reg].lx = val;
116 break;
117 case 2:
118 env->hvf_emul->regs[reg].rx = val;
119 break;
120 case 4:
121 env->hvf_emul->regs[reg].rrx = (uint32_t)val;
122 break;
123 case 8:
124 env->hvf_emul->regs[reg].rrx = val;
125 break;
126 default:
127 abort();
128 }
129 }
130
131 target_ulong read_val_from_reg(target_ulong reg_ptr, int size)
132 {
133 target_ulong val;
134
135 switch (size) {
136 case 1:
137 val = *(uint8_t *)reg_ptr;
138 break;
139 case 2:
140 val = *(uint16_t *)reg_ptr;
141 break;
142 case 4:
143 val = *(uint32_t *)reg_ptr;
144 break;
145 case 8:
146 val = *(uint64_t *)reg_ptr;
147 break;
148 default:
149 abort();
150 }
151 return val;
152 }
153
154 void write_val_to_reg(target_ulong reg_ptr, target_ulong val, int size)
155 {
156 switch (size) {
157 case 1:
158 *(uint8_t *)reg_ptr = val;
159 break;
160 case 2:
161 *(uint16_t *)reg_ptr = val;
162 break;
163 case 4:
164 *(uint64_t *)reg_ptr = (uint32_t)val;
165 break;
166 case 8:
167 *(uint64_t *)reg_ptr = val;
168 break;
169 default:
170 abort();
171 }
172 }
173
174 static bool is_host_reg(struct CPUX86State *env, target_ulong ptr)
175 {
176 return (ptr - (target_ulong)&env->hvf_emul->regs[0]) < sizeof(env->hvf_emul->regs);
177 }
178
179 void write_val_ext(struct CPUX86State *env, target_ulong ptr, target_ulong val, int size)
180 {
181 if (is_host_reg(env, ptr)) {
182 write_val_to_reg(ptr, val, size);
183 return;
184 }
185 vmx_write_mem(env_cpu(env), ptr, &val, size);
186 }
187
188 uint8_t *read_mmio(struct CPUX86State *env, target_ulong ptr, int bytes)
189 {
190 vmx_read_mem(env_cpu(env), env->hvf_emul->mmio_buf, ptr, bytes);
191 return env->hvf_emul->mmio_buf;
192 }
193
194
195 target_ulong read_val_ext(struct CPUX86State *env, target_ulong ptr, int size)
196 {
197 target_ulong val;
198 uint8_t *mmio_ptr;
199
200 if (is_host_reg(env, ptr)) {
201 return read_val_from_reg(ptr, size);
202 }
203
204 mmio_ptr = read_mmio(env, ptr, size);
205 switch (size) {
206 case 1:
207 val = *(uint8_t *)mmio_ptr;
208 break;
209 case 2:
210 val = *(uint16_t *)mmio_ptr;
211 break;
212 case 4:
213 val = *(uint32_t *)mmio_ptr;
214 break;
215 case 8:
216 val = *(uint64_t *)mmio_ptr;
217 break;
218 default:
219 VM_PANIC("bad size\n");
220 break;
221 }
222 return val;
223 }
224
225 static void fetch_operands(struct CPUX86State *env, struct x86_decode *decode,
226 int n, bool val_op0, bool val_op1, bool val_op2)
227 {
228 int i;
229 bool calc_val[3] = {val_op0, val_op1, val_op2};
230
231 for (i = 0; i < n; i++) {
232 switch (decode->op[i].type) {
233 case X86_VAR_IMMEDIATE:
234 break;
235 case X86_VAR_REG:
236 VM_PANIC_ON(!decode->op[i].ptr);
237 if (calc_val[i]) {
238 decode->op[i].val = read_val_from_reg(decode->op[i].ptr,
239 decode->operand_size);
240 }
241 break;
242 case X86_VAR_RM:
243 calc_modrm_operand(env, decode, &decode->op[i]);
244 if (calc_val[i]) {
245 decode->op[i].val = read_val_ext(env, decode->op[i].ptr,
246 decode->operand_size);
247 }
248 break;
249 case X86_VAR_OFFSET:
250 decode->op[i].ptr = decode_linear_addr(env, decode,
251 decode->op[i].ptr,
252 R_DS);
253 if (calc_val[i]) {
254 decode->op[i].val = read_val_ext(env, decode->op[i].ptr,
255 decode->operand_size);
256 }
257 break;
258 default:
259 break;
260 }
261 }
262 }
263
264 static void exec_mov(struct CPUX86State *env, struct x86_decode *decode)
265 {
266 fetch_operands(env, decode, 2, false, true, false);
267 write_val_ext(env, decode->op[0].ptr, decode->op[1].val,
268 decode->operand_size);
269
270 RIP(env) += decode->len;
271 }
272
273 static void exec_add(struct CPUX86State *env, struct x86_decode *decode)
274 {
275 EXEC_2OP_FLAGS_CMD(env, decode, +, SET_FLAGS_OSZAPC_ADD, true);
276 RIP(env) += decode->len;
277 }
278
279 static void exec_or(struct CPUX86State *env, struct x86_decode *decode)
280 {
281 EXEC_2OP_FLAGS_CMD(env, decode, |, SET_FLAGS_OSZAPC_LOGIC, true);
282 RIP(env) += decode->len;
283 }
284
285 static void exec_adc(struct CPUX86State *env, struct x86_decode *decode)
286 {
287 EXEC_2OP_FLAGS_CMD(env, decode, +get_CF(env)+, SET_FLAGS_OSZAPC_ADD, true);
288 RIP(env) += decode->len;
289 }
290
291 static void exec_sbb(struct CPUX86State *env, struct x86_decode *decode)
292 {
293 EXEC_2OP_FLAGS_CMD(env, decode, -get_CF(env)-, SET_FLAGS_OSZAPC_SUB, true);
294 RIP(env) += decode->len;
295 }
296
297 static void exec_and(struct CPUX86State *env, struct x86_decode *decode)
298 {
299 EXEC_2OP_FLAGS_CMD(env, decode, &, SET_FLAGS_OSZAPC_LOGIC, true);
300 RIP(env) += decode->len;
301 }
302
303 static void exec_sub(struct CPUX86State *env, struct x86_decode *decode)
304 {
305 EXEC_2OP_FLAGS_CMD(env, decode, -, SET_FLAGS_OSZAPC_SUB, true);
306 RIP(env) += decode->len;
307 }
308
309 static void exec_xor(struct CPUX86State *env, struct x86_decode *decode)
310 {
311 EXEC_2OP_FLAGS_CMD(env, decode, ^, SET_FLAGS_OSZAPC_LOGIC, true);
312 RIP(env) += decode->len;
313 }
314
315 static void exec_neg(struct CPUX86State *env, struct x86_decode *decode)
316 {
317 /*EXEC_2OP_FLAGS_CMD(env, decode, -, SET_FLAGS_OSZAPC_SUB, false);*/
318 int32_t val;
319 fetch_operands(env, decode, 2, true, true, false);
320
321 val = 0 - sign(decode->op[1].val, decode->operand_size);
322 write_val_ext(env, decode->op[1].ptr, val, decode->operand_size);
323
324 if (4 == decode->operand_size) {
325 SET_FLAGS_OSZAPC_SUB32(env, 0, 0 - val, val);
326 } else if (2 == decode->operand_size) {
327 SET_FLAGS_OSZAPC_SUB16(env, 0, 0 - val, val);
328 } else if (1 == decode->operand_size) {
329 SET_FLAGS_OSZAPC_SUB8(env, 0, 0 - val, val);
330 } else {
331 VM_PANIC("bad op size\n");
332 }
333
334 /*lflags_to_rflags(env);*/
335 RIP(env) += decode->len;
336 }
337
338 static void exec_cmp(struct CPUX86State *env, struct x86_decode *decode)
339 {
340 EXEC_2OP_FLAGS_CMD(env, decode, -, SET_FLAGS_OSZAPC_SUB, false);
341 RIP(env) += decode->len;
342 }
343
344 static void exec_inc(struct CPUX86State *env, struct x86_decode *decode)
345 {
346 decode->op[1].type = X86_VAR_IMMEDIATE;
347 decode->op[1].val = 0;
348
349 EXEC_2OP_FLAGS_CMD(env, decode, +1+, SET_FLAGS_OSZAP_ADD, true);
350
351 RIP(env) += decode->len;
352 }
353
354 static void exec_dec(struct CPUX86State *env, struct x86_decode *decode)
355 {
356 decode->op[1].type = X86_VAR_IMMEDIATE;
357 decode->op[1].val = 0;
358
359 EXEC_2OP_FLAGS_CMD(env, decode, -1-, SET_FLAGS_OSZAP_SUB, true);
360 RIP(env) += decode->len;
361 }
362
363 static void exec_tst(struct CPUX86State *env, struct x86_decode *decode)
364 {
365 EXEC_2OP_FLAGS_CMD(env, decode, &, SET_FLAGS_OSZAPC_LOGIC, false);
366 RIP(env) += decode->len;
367 }
368
369 static void exec_not(struct CPUX86State *env, struct x86_decode *decode)
370 {
371 fetch_operands(env, decode, 1, true, false, false);
372
373 write_val_ext(env, decode->op[0].ptr, ~decode->op[0].val,
374 decode->operand_size);
375 RIP(env) += decode->len;
376 }
377
378 void exec_movzx(struct CPUX86State *env, struct x86_decode *decode)
379 {
380 int src_op_size;
381 int op_size = decode->operand_size;
382
383 fetch_operands(env, decode, 1, false, false, false);
384
385 if (0xb6 == decode->opcode[1]) {
386 src_op_size = 1;
387 } else {
388 src_op_size = 2;
389 }
390 decode->operand_size = src_op_size;
391 calc_modrm_operand(env, decode, &decode->op[1]);
392 decode->op[1].val = read_val_ext(env, decode->op[1].ptr, src_op_size);
393 write_val_ext(env, decode->op[0].ptr, decode->op[1].val, op_size);
394
395 RIP(env) += decode->len;
396 }
397
398 static void exec_out(struct CPUX86State *env, struct x86_decode *decode)
399 {
400 switch (decode->opcode[0]) {
401 case 0xe6:
402 hvf_handle_io(env_cpu(env), decode->op[0].val, &AL(env), 1, 1, 1);
403 break;
404 case 0xe7:
405 hvf_handle_io(env_cpu(env), decode->op[0].val, &RAX(env), 1,
406 decode->operand_size, 1);
407 break;
408 case 0xee:
409 hvf_handle_io(env_cpu(env), DX(env), &AL(env), 1, 1, 1);
410 break;
411 case 0xef:
412 hvf_handle_io(env_cpu(env), DX(env), &RAX(env), 1,
413 decode->operand_size, 1);
414 break;
415 default:
416 VM_PANIC("Bad out opcode\n");
417 break;
418 }
419 RIP(env) += decode->len;
420 }
421
422 static void exec_in(struct CPUX86State *env, struct x86_decode *decode)
423 {
424 target_ulong val = 0;
425 switch (decode->opcode[0]) {
426 case 0xe4:
427 hvf_handle_io(env_cpu(env), decode->op[0].val, &AL(env), 0, 1, 1);
428 break;
429 case 0xe5:
430 hvf_handle_io(env_cpu(env), decode->op[0].val, &val, 0,
431 decode->operand_size, 1);
432 if (decode->operand_size == 2) {
433 AX(env) = val;
434 } else {
435 RAX(env) = (uint32_t)val;
436 }
437 break;
438 case 0xec:
439 hvf_handle_io(env_cpu(env), DX(env), &AL(env), 0, 1, 1);
440 break;
441 case 0xed:
442 hvf_handle_io(env_cpu(env), DX(env), &val, 0, decode->operand_size, 1);
443 if (decode->operand_size == 2) {
444 AX(env) = val;
445 } else {
446 RAX(env) = (uint32_t)val;
447 }
448
449 break;
450 default:
451 VM_PANIC("Bad in opcode\n");
452 break;
453 }
454
455 RIP(env) += decode->len;
456 }
457
458 static inline void string_increment_reg(struct CPUX86State *env, int reg,
459 struct x86_decode *decode)
460 {
461 target_ulong val = read_reg(env, reg, decode->addressing_size);
462 if (env->hvf_emul->rflags.df) {
463 val -= decode->operand_size;
464 } else {
465 val += decode->operand_size;
466 }
467 write_reg(env, reg, val, decode->addressing_size);
468 }
469
470 static inline void string_rep(struct CPUX86State *env, struct x86_decode *decode,
471 void (*func)(struct CPUX86State *env,
472 struct x86_decode *ins), int rep)
473 {
474 target_ulong rcx = read_reg(env, R_ECX, decode->addressing_size);
475 while (rcx--) {
476 func(env, decode);
477 write_reg(env, R_ECX, rcx, decode->addressing_size);
478 if ((PREFIX_REP == rep) && !get_ZF(env)) {
479 break;
480 }
481 if ((PREFIX_REPN == rep) && get_ZF(env)) {
482 break;
483 }
484 }
485 }
486
487 static void exec_ins_single(struct CPUX86State *env, struct x86_decode *decode)
488 {
489 target_ulong addr = linear_addr_size(env_cpu(env), RDI(env),
490 decode->addressing_size, R_ES);
491
492 hvf_handle_io(env_cpu(env), DX(env), env->hvf_emul->mmio_buf, 0,
493 decode->operand_size, 1);
494 vmx_write_mem(env_cpu(env), addr, env->hvf_emul->mmio_buf,
495 decode->operand_size);
496
497 string_increment_reg(env, R_EDI, decode);
498 }
499
500 static void exec_ins(struct CPUX86State *env, struct x86_decode *decode)
501 {
502 if (decode->rep) {
503 string_rep(env, decode, exec_ins_single, 0);
504 } else {
505 exec_ins_single(env, decode);
506 }
507
508 RIP(env) += decode->len;
509 }
510
511 static void exec_outs_single(struct CPUX86State *env, struct x86_decode *decode)
512 {
513 target_ulong addr = decode_linear_addr(env, decode, RSI(env), R_DS);
514
515 vmx_read_mem(env_cpu(env), env->hvf_emul->mmio_buf, addr,
516 decode->operand_size);
517 hvf_handle_io(env_cpu(env), DX(env), env->hvf_emul->mmio_buf, 1,
518 decode->operand_size, 1);
519
520 string_increment_reg(env, R_ESI, decode);
521 }
522
523 static void exec_outs(struct CPUX86State *env, struct x86_decode *decode)
524 {
525 if (decode->rep) {
526 string_rep(env, decode, exec_outs_single, 0);
527 } else {
528 exec_outs_single(env, decode);
529 }
530
531 RIP(env) += decode->len;
532 }
533
534 static void exec_movs_single(struct CPUX86State *env, struct x86_decode *decode)
535 {
536 target_ulong src_addr;
537 target_ulong dst_addr;
538 target_ulong val;
539
540 src_addr = decode_linear_addr(env, decode, RSI(env), R_DS);
541 dst_addr = linear_addr_size(env_cpu(env), RDI(env),
542 decode->addressing_size, R_ES);
543
544 val = read_val_ext(env, src_addr, decode->operand_size);
545 write_val_ext(env, dst_addr, val, decode->operand_size);
546
547 string_increment_reg(env, R_ESI, decode);
548 string_increment_reg(env, R_EDI, decode);
549 }
550
551 static void exec_movs(struct CPUX86State *env, struct x86_decode *decode)
552 {
553 if (decode->rep) {
554 string_rep(env, decode, exec_movs_single, 0);
555 } else {
556 exec_movs_single(env, decode);
557 }
558
559 RIP(env) += decode->len;
560 }
561
562 static void exec_cmps_single(struct CPUX86State *env, struct x86_decode *decode)
563 {
564 target_ulong src_addr;
565 target_ulong dst_addr;
566
567 src_addr = decode_linear_addr(env, decode, RSI(env), R_DS);
568 dst_addr = linear_addr_size(env_cpu(env), RDI(env),
569 decode->addressing_size, R_ES);
570
571 decode->op[0].type = X86_VAR_IMMEDIATE;
572 decode->op[0].val = read_val_ext(env, src_addr, decode->operand_size);
573 decode->op[1].type = X86_VAR_IMMEDIATE;
574 decode->op[1].val = read_val_ext(env, dst_addr, decode->operand_size);
575
576 EXEC_2OP_FLAGS_CMD(env, decode, -, SET_FLAGS_OSZAPC_SUB, false);
577
578 string_increment_reg(env, R_ESI, decode);
579 string_increment_reg(env, R_EDI, decode);
580 }
581
582 static void exec_cmps(struct CPUX86State *env, struct x86_decode *decode)
583 {
584 if (decode->rep) {
585 string_rep(env, decode, exec_cmps_single, decode->rep);
586 } else {
587 exec_cmps_single(env, decode);
588 }
589 RIP(env) += decode->len;
590 }
591
592
593 static void exec_stos_single(struct CPUX86State *env, struct x86_decode *decode)
594 {
595 target_ulong addr;
596 target_ulong val;
597
598 addr = linear_addr_size(env_cpu(env), RDI(env),
599 decode->addressing_size, R_ES);
600 val = read_reg(env, R_EAX, decode->operand_size);
601 vmx_write_mem(env_cpu(env), addr, &val, decode->operand_size);
602
603 string_increment_reg(env, R_EDI, decode);
604 }
605
606
607 static void exec_stos(struct CPUX86State *env, struct x86_decode *decode)
608 {
609 if (decode->rep) {
610 string_rep(env, decode, exec_stos_single, 0);
611 } else {
612 exec_stos_single(env, decode);
613 }
614
615 RIP(env) += decode->len;
616 }
617
618 static void exec_scas_single(struct CPUX86State *env, struct x86_decode *decode)
619 {
620 target_ulong addr;
621
622 addr = linear_addr_size(env_cpu(env), RDI(env),
623 decode->addressing_size, R_ES);
624 decode->op[1].type = X86_VAR_IMMEDIATE;
625 vmx_read_mem(env_cpu(env), &decode->op[1].val, addr, decode->operand_size);
626
627 EXEC_2OP_FLAGS_CMD(env, decode, -, SET_FLAGS_OSZAPC_SUB, false);
628 string_increment_reg(env, R_EDI, decode);
629 }
630
631 static void exec_scas(struct CPUX86State *env, struct x86_decode *decode)
632 {
633 decode->op[0].type = X86_VAR_REG;
634 decode->op[0].reg = R_EAX;
635 if (decode->rep) {
636 string_rep(env, decode, exec_scas_single, decode->rep);
637 } else {
638 exec_scas_single(env, decode);
639 }
640
641 RIP(env) += decode->len;
642 }
643
644 static void exec_lods_single(struct CPUX86State *env, struct x86_decode *decode)
645 {
646 target_ulong addr;
647 target_ulong val = 0;
648
649 addr = decode_linear_addr(env, decode, RSI(env), R_DS);
650 vmx_read_mem(env_cpu(env), &val, addr, decode->operand_size);
651 write_reg(env, R_EAX, val, decode->operand_size);
652
653 string_increment_reg(env, R_ESI, decode);
654 }
655
656 static void exec_lods(struct CPUX86State *env, struct x86_decode *decode)
657 {
658 if (decode->rep) {
659 string_rep(env, decode, exec_lods_single, 0);
660 } else {
661 exec_lods_single(env, decode);
662 }
663
664 RIP(env) += decode->len;
665 }
666
667 #define MSR_IA32_UCODE_REV 0x00000017
668
669 void simulate_rdmsr(struct CPUState *cpu)
670 {
671 X86CPU *x86_cpu = X86_CPU(cpu);
672 CPUX86State *env = &x86_cpu->env;
673 uint32_t msr = ECX(env);
674 uint64_t val = 0;
675
676 switch (msr) {
677 case MSR_IA32_TSC:
678 val = rdtscp() + rvmcs(cpu->hvf_fd, VMCS_TSC_OFFSET);
679 break;
680 case MSR_IA32_APICBASE:
681 val = cpu_get_apic_base(X86_CPU(cpu)->apic_state);
682 break;
683 case MSR_IA32_UCODE_REV:
684 val = (0x100000000ULL << 32) | 0x100000000ULL;
685 break;
686 case MSR_EFER:
687 val = rvmcs(cpu->hvf_fd, VMCS_GUEST_IA32_EFER);
688 break;
689 case MSR_FSBASE:
690 val = rvmcs(cpu->hvf_fd, VMCS_GUEST_FS_BASE);
691 break;
692 case MSR_GSBASE:
693 val = rvmcs(cpu->hvf_fd, VMCS_GUEST_GS_BASE);
694 break;
695 case MSR_KERNELGSBASE:
696 val = rvmcs(cpu->hvf_fd, VMCS_HOST_FS_BASE);
697 break;
698 case MSR_STAR:
699 abort();
700 break;
701 case MSR_LSTAR:
702 abort();
703 break;
704 case MSR_CSTAR:
705 abort();
706 break;
707 case MSR_IA32_MISC_ENABLE:
708 val = env->msr_ia32_misc_enable;
709 break;
710 case MSR_MTRRphysBase(0):
711 case MSR_MTRRphysBase(1):
712 case MSR_MTRRphysBase(2):
713 case MSR_MTRRphysBase(3):
714 case MSR_MTRRphysBase(4):
715 case MSR_MTRRphysBase(5):
716 case MSR_MTRRphysBase(6):
717 case MSR_MTRRphysBase(7):
718 val = env->mtrr_var[(ECX(env) - MSR_MTRRphysBase(0)) / 2].base;
719 break;
720 case MSR_MTRRphysMask(0):
721 case MSR_MTRRphysMask(1):
722 case MSR_MTRRphysMask(2):
723 case MSR_MTRRphysMask(3):
724 case MSR_MTRRphysMask(4):
725 case MSR_MTRRphysMask(5):
726 case MSR_MTRRphysMask(6):
727 case MSR_MTRRphysMask(7):
728 val = env->mtrr_var[(ECX(env) - MSR_MTRRphysMask(0)) / 2].mask;
729 break;
730 case MSR_MTRRfix64K_00000:
731 val = env->mtrr_fixed[0];
732 break;
733 case MSR_MTRRfix16K_80000:
734 case MSR_MTRRfix16K_A0000:
735 val = env->mtrr_fixed[ECX(env) - MSR_MTRRfix16K_80000 + 1];
736 break;
737 case MSR_MTRRfix4K_C0000:
738 case MSR_MTRRfix4K_C8000:
739 case MSR_MTRRfix4K_D0000:
740 case MSR_MTRRfix4K_D8000:
741 case MSR_MTRRfix4K_E0000:
742 case MSR_MTRRfix4K_E8000:
743 case MSR_MTRRfix4K_F0000:
744 case MSR_MTRRfix4K_F8000:
745 val = env->mtrr_fixed[ECX(env) - MSR_MTRRfix4K_C0000 + 3];
746 break;
747 case MSR_MTRRdefType:
748 val = env->mtrr_deftype;
749 break;
750 default:
751 /* fprintf(stderr, "%s: unknown msr 0x%x\n", __func__, msr); */
752 val = 0;
753 break;
754 }
755
756 RAX(env) = (uint32_t)val;
757 RDX(env) = (uint32_t)(val >> 32);
758 }
759
760 static void exec_rdmsr(struct CPUX86State *env, struct x86_decode *decode)
761 {
762 simulate_rdmsr(env_cpu(env));
763 RIP(env) += decode->len;
764 }
765
766 void simulate_wrmsr(struct CPUState *cpu)
767 {
768 X86CPU *x86_cpu = X86_CPU(cpu);
769 CPUX86State *env = &x86_cpu->env;
770 uint32_t msr = ECX(env);
771 uint64_t data = ((uint64_t)EDX(env) << 32) | EAX(env);
772
773 switch (msr) {
774 case MSR_IA32_TSC:
775 break;
776 case MSR_IA32_APICBASE:
777 cpu_set_apic_base(X86_CPU(cpu)->apic_state, data);
778 break;
779 case MSR_FSBASE:
780 wvmcs(cpu->hvf_fd, VMCS_GUEST_FS_BASE, data);
781 break;
782 case MSR_GSBASE:
783 wvmcs(cpu->hvf_fd, VMCS_GUEST_GS_BASE, data);
784 break;
785 case MSR_KERNELGSBASE:
786 wvmcs(cpu->hvf_fd, VMCS_HOST_FS_BASE, data);
787 break;
788 case MSR_STAR:
789 abort();
790 break;
791 case MSR_LSTAR:
792 abort();
793 break;
794 case MSR_CSTAR:
795 abort();
796 break;
797 case MSR_EFER:
798 /*printf("new efer %llx\n", EFER(cpu));*/
799 wvmcs(cpu->hvf_fd, VMCS_GUEST_IA32_EFER, data);
800 if (data & MSR_EFER_NXE) {
801 hv_vcpu_invalidate_tlb(cpu->hvf_fd);
802 }
803 break;
804 case MSR_MTRRphysBase(0):
805 case MSR_MTRRphysBase(1):
806 case MSR_MTRRphysBase(2):
807 case MSR_MTRRphysBase(3):
808 case MSR_MTRRphysBase(4):
809 case MSR_MTRRphysBase(5):
810 case MSR_MTRRphysBase(6):
811 case MSR_MTRRphysBase(7):
812 env->mtrr_var[(ECX(env) - MSR_MTRRphysBase(0)) / 2].base = data;
813 break;
814 case MSR_MTRRphysMask(0):
815 case MSR_MTRRphysMask(1):
816 case MSR_MTRRphysMask(2):
817 case MSR_MTRRphysMask(3):
818 case MSR_MTRRphysMask(4):
819 case MSR_MTRRphysMask(5):
820 case MSR_MTRRphysMask(6):
821 case MSR_MTRRphysMask(7):
822 env->mtrr_var[(ECX(env) - MSR_MTRRphysMask(0)) / 2].mask = data;
823 break;
824 case MSR_MTRRfix64K_00000:
825 env->mtrr_fixed[ECX(env) - MSR_MTRRfix64K_00000] = data;
826 break;
827 case MSR_MTRRfix16K_80000:
828 case MSR_MTRRfix16K_A0000:
829 env->mtrr_fixed[ECX(env) - MSR_MTRRfix16K_80000 + 1] = data;
830 break;
831 case MSR_MTRRfix4K_C0000:
832 case MSR_MTRRfix4K_C8000:
833 case MSR_MTRRfix4K_D0000:
834 case MSR_MTRRfix4K_D8000:
835 case MSR_MTRRfix4K_E0000:
836 case MSR_MTRRfix4K_E8000:
837 case MSR_MTRRfix4K_F0000:
838 case MSR_MTRRfix4K_F8000:
839 env->mtrr_fixed[ECX(env) - MSR_MTRRfix4K_C0000 + 3] = data;
840 break;
841 case MSR_MTRRdefType:
842 env->mtrr_deftype = data;
843 break;
844 default:
845 break;
846 }
847
848 /* Related to support known hypervisor interface */
849 /* if (g_hypervisor_iface)
850 g_hypervisor_iface->wrmsr_handler(cpu, msr, data);
851
852 printf("write msr %llx\n", RCX(cpu));*/
853 }
854
855 static void exec_wrmsr(struct CPUX86State *env, struct x86_decode *decode)
856 {
857 simulate_wrmsr(env_cpu(env));
858 RIP(env) += decode->len;
859 }
860
861 /*
862 * flag:
863 * 0 - bt, 1 - btc, 2 - bts, 3 - btr
864 */
865 static void do_bt(struct CPUX86State *env, struct x86_decode *decode, int flag)
866 {
867 int32_t displacement;
868 uint8_t index;
869 bool cf;
870 int mask = (4 == decode->operand_size) ? 0x1f : 0xf;
871
872 VM_PANIC_ON(decode->rex.rex);
873
874 fetch_operands(env, decode, 2, false, true, false);
875 index = decode->op[1].val & mask;
876
877 if (decode->op[0].type != X86_VAR_REG) {
878 if (4 == decode->operand_size) {
879 displacement = ((int32_t) (decode->op[1].val & 0xffffffe0)) / 32;
880 decode->op[0].ptr += 4 * displacement;
881 } else if (2 == decode->operand_size) {
882 displacement = ((int16_t) (decode->op[1].val & 0xfff0)) / 16;
883 decode->op[0].ptr += 2 * displacement;
884 } else {
885 VM_PANIC("bt 64bit\n");
886 }
887 }
888 decode->op[0].val = read_val_ext(env, decode->op[0].ptr,
889 decode->operand_size);
890 cf = (decode->op[0].val >> index) & 0x01;
891
892 switch (flag) {
893 case 0:
894 set_CF(env, cf);
895 return;
896 case 1:
897 decode->op[0].val ^= (1u << index);
898 break;
899 case 2:
900 decode->op[0].val |= (1u << index);
901 break;
902 case 3:
903 decode->op[0].val &= ~(1u << index);
904 break;
905 }
906 write_val_ext(env, decode->op[0].ptr, decode->op[0].val,
907 decode->operand_size);
908 set_CF(env, cf);
909 }
910
911 static void exec_bt(struct CPUX86State *env, struct x86_decode *decode)
912 {
913 do_bt(env, decode, 0);
914 RIP(env) += decode->len;
915 }
916
917 static void exec_btc(struct CPUX86State *env, struct x86_decode *decode)
918 {
919 do_bt(env, decode, 1);
920 RIP(env) += decode->len;
921 }
922
923 static void exec_btr(struct CPUX86State *env, struct x86_decode *decode)
924 {
925 do_bt(env, decode, 3);
926 RIP(env) += decode->len;
927 }
928
929 static void exec_bts(struct CPUX86State *env, struct x86_decode *decode)
930 {
931 do_bt(env, decode, 2);
932 RIP(env) += decode->len;
933 }
934
935 void exec_shl(struct CPUX86State *env, struct x86_decode *decode)
936 {
937 uint8_t count;
938 int of = 0, cf = 0;
939
940 fetch_operands(env, decode, 2, true, true, false);
941
942 count = decode->op[1].val;
943 count &= 0x1f; /* count is masked to 5 bits*/
944 if (!count) {
945 goto exit;
946 }
947
948 switch (decode->operand_size) {
949 case 1:
950 {
951 uint8_t res = 0;
952 if (count <= 8) {
953 res = (decode->op[0].val << count);
954 cf = (decode->op[0].val >> (8 - count)) & 0x1;
955 of = cf ^ (res >> 7);
956 }
957
958 write_val_ext(env, decode->op[0].ptr, res, 1);
959 SET_FLAGS_OSZAPC_LOGIC8(env, 0, 0, res);
960 SET_FLAGS_OxxxxC(env, of, cf);
961 break;
962 }
963 case 2:
964 {
965 uint16_t res = 0;
966
967 /* from bochs */
968 if (count <= 16) {
969 res = (decode->op[0].val << count);
970 cf = (decode->op[0].val >> (16 - count)) & 0x1;
971 of = cf ^ (res >> 15); /* of = cf ^ result15 */
972 }
973
974 write_val_ext(env, decode->op[0].ptr, res, 2);
975 SET_FLAGS_OSZAPC_LOGIC16(env, 0, 0, res);
976 SET_FLAGS_OxxxxC(env, of, cf);
977 break;
978 }
979 case 4:
980 {
981 uint32_t res = decode->op[0].val << count;
982
983 write_val_ext(env, decode->op[0].ptr, res, 4);
984 SET_FLAGS_OSZAPC_LOGIC32(env, 0, 0, res);
985 cf = (decode->op[0].val >> (32 - count)) & 0x1;
986 of = cf ^ (res >> 31); /* of = cf ^ result31 */
987 SET_FLAGS_OxxxxC(env, of, cf);
988 break;
989 }
990 default:
991 abort();
992 }
993
994 exit:
995 /* lflags_to_rflags(env); */
996 RIP(env) += decode->len;
997 }
998
999 void exec_movsx(CPUX86State *env, struct x86_decode *decode)
1000 {
1001 int src_op_size;
1002 int op_size = decode->operand_size;
1003
1004 fetch_operands(env, decode, 2, false, false, false);
1005
1006 if (0xbe == decode->opcode[1]) {
1007 src_op_size = 1;
1008 } else {
1009 src_op_size = 2;
1010 }
1011
1012 decode->operand_size = src_op_size;
1013 calc_modrm_operand(env, decode, &decode->op[1]);
1014 decode->op[1].val = sign(read_val_ext(env, decode->op[1].ptr, src_op_size),
1015 src_op_size);
1016
1017 write_val_ext(env, decode->op[0].ptr, decode->op[1].val, op_size);
1018
1019 RIP(env) += decode->len;
1020 }
1021
1022 void exec_ror(struct CPUX86State *env, struct x86_decode *decode)
1023 {
1024 uint8_t count;
1025
1026 fetch_operands(env, decode, 2, true, true, false);
1027 count = decode->op[1].val;
1028
1029 switch (decode->operand_size) {
1030 case 1:
1031 {
1032 uint32_t bit6, bit7;
1033 uint8_t res;
1034
1035 if ((count & 0x07) == 0) {
1036 if (count & 0x18) {
1037 bit6 = ((uint8_t)decode->op[0].val >> 6) & 1;
1038 bit7 = ((uint8_t)decode->op[0].val >> 7) & 1;
1039 SET_FLAGS_OxxxxC(env, bit6 ^ bit7, bit7);
1040 }
1041 } else {
1042 count &= 0x7; /* use only bottom 3 bits */
1043 res = ((uint8_t)decode->op[0].val >> count) |
1044 ((uint8_t)decode->op[0].val << (8 - count));
1045 write_val_ext(env, decode->op[0].ptr, res, 1);
1046 bit6 = (res >> 6) & 1;
1047 bit7 = (res >> 7) & 1;
1048 /* set eflags: ROR count affects the following flags: C, O */
1049 SET_FLAGS_OxxxxC(env, bit6 ^ bit7, bit7);
1050 }
1051 break;
1052 }
1053 case 2:
1054 {
1055 uint32_t bit14, bit15;
1056 uint16_t res;
1057
1058 if ((count & 0x0f) == 0) {
1059 if (count & 0x10) {
1060 bit14 = ((uint16_t)decode->op[0].val >> 14) & 1;
1061 bit15 = ((uint16_t)decode->op[0].val >> 15) & 1;
1062 /* of = result14 ^ result15 */
1063 SET_FLAGS_OxxxxC(env, bit14 ^ bit15, bit15);
1064 }
1065 } else {
1066 count &= 0x0f; /* use only 4 LSB's */
1067 res = ((uint16_t)decode->op[0].val >> count) |
1068 ((uint16_t)decode->op[0].val << (16 - count));
1069 write_val_ext(env, decode->op[0].ptr, res, 2);
1070
1071 bit14 = (res >> 14) & 1;
1072 bit15 = (res >> 15) & 1;
1073 /* of = result14 ^ result15 */
1074 SET_FLAGS_OxxxxC(env, bit14 ^ bit15, bit15);
1075 }
1076 break;
1077 }
1078 case 4:
1079 {
1080 uint32_t bit31, bit30;
1081 uint32_t res;
1082
1083 count &= 0x1f;
1084 if (count) {
1085 res = ((uint32_t)decode->op[0].val >> count) |
1086 ((uint32_t)decode->op[0].val << (32 - count));
1087 write_val_ext(env, decode->op[0].ptr, res, 4);
1088
1089 bit31 = (res >> 31) & 1;
1090 bit30 = (res >> 30) & 1;
1091 /* of = result30 ^ result31 */
1092 SET_FLAGS_OxxxxC(env, bit30 ^ bit31, bit31);
1093 }
1094 break;
1095 }
1096 }
1097 RIP(env) += decode->len;
1098 }
1099
1100 void exec_rol(struct CPUX86State *env, struct x86_decode *decode)
1101 {
1102 uint8_t count;
1103
1104 fetch_operands(env, decode, 2, true, true, false);
1105 count = decode->op[1].val;
1106
1107 switch (decode->operand_size) {
1108 case 1:
1109 {
1110 uint32_t bit0, bit7;
1111 uint8_t res;
1112
1113 if ((count & 0x07) == 0) {
1114 if (count & 0x18) {
1115 bit0 = ((uint8_t)decode->op[0].val & 1);
1116 bit7 = ((uint8_t)decode->op[0].val >> 7);
1117 SET_FLAGS_OxxxxC(env, bit0 ^ bit7, bit0);
1118 }
1119 } else {
1120 count &= 0x7; /* use only lowest 3 bits */
1121 res = ((uint8_t)decode->op[0].val << count) |
1122 ((uint8_t)decode->op[0].val >> (8 - count));
1123
1124 write_val_ext(env, decode->op[0].ptr, res, 1);
1125 /* set eflags:
1126 * ROL count affects the following flags: C, O
1127 */
1128 bit0 = (res & 1);
1129 bit7 = (res >> 7);
1130 SET_FLAGS_OxxxxC(env, bit0 ^ bit7, bit0);
1131 }
1132 break;
1133 }
1134 case 2:
1135 {
1136 uint32_t bit0, bit15;
1137 uint16_t res;
1138
1139 if ((count & 0x0f) == 0) {
1140 if (count & 0x10) {
1141 bit0 = ((uint16_t)decode->op[0].val & 0x1);
1142 bit15 = ((uint16_t)decode->op[0].val >> 15);
1143 /* of = cf ^ result15 */
1144 SET_FLAGS_OxxxxC(env, bit0 ^ bit15, bit0);
1145 }
1146 } else {
1147 count &= 0x0f; /* only use bottom 4 bits */
1148 res = ((uint16_t)decode->op[0].val << count) |
1149 ((uint16_t)decode->op[0].val >> (16 - count));
1150
1151 write_val_ext(env, decode->op[0].ptr, res, 2);
1152 bit0 = (res & 0x1);
1153 bit15 = (res >> 15);
1154 /* of = cf ^ result15 */
1155 SET_FLAGS_OxxxxC(env, bit0 ^ bit15, bit0);
1156 }
1157 break;
1158 }
1159 case 4:
1160 {
1161 uint32_t bit0, bit31;
1162 uint32_t res;
1163
1164 count &= 0x1f;
1165 if (count) {
1166 res = ((uint32_t)decode->op[0].val << count) |
1167 ((uint32_t)decode->op[0].val >> (32 - count));
1168
1169 write_val_ext(env, decode->op[0].ptr, res, 4);
1170 bit0 = (res & 0x1);
1171 bit31 = (res >> 31);
1172 /* of = cf ^ result31 */
1173 SET_FLAGS_OxxxxC(env, bit0 ^ bit31, bit0);
1174 }
1175 break;
1176 }
1177 }
1178 RIP(env) += decode->len;
1179 }
1180
1181
1182 void exec_rcl(struct CPUX86State *env, struct x86_decode *decode)
1183 {
1184 uint8_t count;
1185 int of = 0, cf = 0;
1186
1187 fetch_operands(env, decode, 2, true, true, false);
1188 count = decode->op[1].val & 0x1f;
1189
1190 switch (decode->operand_size) {
1191 case 1:
1192 {
1193 uint8_t op1_8 = decode->op[0].val;
1194 uint8_t res;
1195 count %= 9;
1196 if (!count) {
1197 break;
1198 }
1199
1200 if (1 == count) {
1201 res = (op1_8 << 1) | get_CF(env);
1202 } else {
1203 res = (op1_8 << count) | (get_CF(env) << (count - 1)) |
1204 (op1_8 >> (9 - count));
1205 }
1206
1207 write_val_ext(env, decode->op[0].ptr, res, 1);
1208
1209 cf = (op1_8 >> (8 - count)) & 0x01;
1210 of = cf ^ (res >> 7); /* of = cf ^ result7 */
1211 SET_FLAGS_OxxxxC(env, of, cf);
1212 break;
1213 }
1214 case 2:
1215 {
1216 uint16_t res;
1217 uint16_t op1_16 = decode->op[0].val;
1218
1219 count %= 17;
1220 if (!count) {
1221 break;
1222 }
1223
1224 if (1 == count) {
1225 res = (op1_16 << 1) | get_CF(env);
1226 } else if (count == 16) {
1227 res = (get_CF(env) << 15) | (op1_16 >> 1);
1228 } else { /* 2..15 */
1229 res = (op1_16 << count) | (get_CF(env) << (count - 1)) |
1230 (op1_16 >> (17 - count));
1231 }
1232
1233 write_val_ext(env, decode->op[0].ptr, res, 2);
1234
1235 cf = (op1_16 >> (16 - count)) & 0x1;
1236 of = cf ^ (res >> 15); /* of = cf ^ result15 */
1237 SET_FLAGS_OxxxxC(env, of, cf);
1238 break;
1239 }
1240 case 4:
1241 {
1242 uint32_t res;
1243 uint32_t op1_32 = decode->op[0].val;
1244
1245 if (!count) {
1246 break;
1247 }
1248
1249 if (1 == count) {
1250 res = (op1_32 << 1) | get_CF(env);
1251 } else {
1252 res = (op1_32 << count) | (get_CF(env) << (count - 1)) |
1253 (op1_32 >> (33 - count));
1254 }
1255
1256 write_val_ext(env, decode->op[0].ptr, res, 4);
1257
1258 cf = (op1_32 >> (32 - count)) & 0x1;
1259 of = cf ^ (res >> 31); /* of = cf ^ result31 */
1260 SET_FLAGS_OxxxxC(env, of, cf);
1261 break;
1262 }
1263 }
1264 RIP(env) += decode->len;
1265 }
1266
1267 void exec_rcr(struct CPUX86State *env, struct x86_decode *decode)
1268 {
1269 uint8_t count;
1270 int of = 0, cf = 0;
1271
1272 fetch_operands(env, decode, 2, true, true, false);
1273 count = decode->op[1].val & 0x1f;
1274
1275 switch (decode->operand_size) {
1276 case 1:
1277 {
1278 uint8_t op1_8 = decode->op[0].val;
1279 uint8_t res;
1280
1281 count %= 9;
1282 if (!count) {
1283 break;
1284 }
1285 res = (op1_8 >> count) | (get_CF(env) << (8 - count)) |
1286 (op1_8 << (9 - count));
1287
1288 write_val_ext(env, decode->op[0].ptr, res, 1);
1289
1290 cf = (op1_8 >> (count - 1)) & 0x1;
1291 of = (((res << 1) ^ res) >> 7) & 0x1; /* of = result6 ^ result7 */
1292 SET_FLAGS_OxxxxC(env, of, cf);
1293 break;
1294 }
1295 case 2:
1296 {
1297 uint16_t op1_16 = decode->op[0].val;
1298 uint16_t res;
1299
1300 count %= 17;
1301 if (!count) {
1302 break;
1303 }
1304 res = (op1_16 >> count) | (get_CF(env) << (16 - count)) |
1305 (op1_16 << (17 - count));
1306
1307 write_val_ext(env, decode->op[0].ptr, res, 2);
1308
1309 cf = (op1_16 >> (count - 1)) & 0x1;
1310 of = ((uint16_t)((res << 1) ^ res) >> 15) & 0x1; /* of = result15 ^
1311 result14 */
1312 SET_FLAGS_OxxxxC(env, of, cf);
1313 break;
1314 }
1315 case 4:
1316 {
1317 uint32_t res;
1318 uint32_t op1_32 = decode->op[0].val;
1319
1320 if (!count) {
1321 break;
1322 }
1323
1324 if (1 == count) {
1325 res = (op1_32 >> 1) | (get_CF(env) << 31);
1326 } else {
1327 res = (op1_32 >> count) | (get_CF(env) << (32 - count)) |
1328 (op1_32 << (33 - count));
1329 }
1330
1331 write_val_ext(env, decode->op[0].ptr, res, 4);
1332
1333 cf = (op1_32 >> (count - 1)) & 0x1;
1334 of = ((res << 1) ^ res) >> 31; /* of = result30 ^ result31 */
1335 SET_FLAGS_OxxxxC(env, of, cf);
1336 break;
1337 }
1338 }
1339 RIP(env) += decode->len;
1340 }
1341
1342 static void exec_xchg(struct CPUX86State *env, struct x86_decode *decode)
1343 {
1344 fetch_operands(env, decode, 2, true, true, false);
1345
1346 write_val_ext(env, decode->op[0].ptr, decode->op[1].val,
1347 decode->operand_size);
1348 write_val_ext(env, decode->op[1].ptr, decode->op[0].val,
1349 decode->operand_size);
1350
1351 RIP(env) += decode->len;
1352 }
1353
1354 static void exec_xadd(struct CPUX86State *env, struct x86_decode *decode)
1355 {
1356 EXEC_2OP_FLAGS_CMD(env, decode, +, SET_FLAGS_OSZAPC_ADD, true);
1357 write_val_ext(env, decode->op[1].ptr, decode->op[0].val,
1358 decode->operand_size);
1359
1360 RIP(env) += decode->len;
1361 }
1362
1363 static struct cmd_handler {
1364 enum x86_decode_cmd cmd;
1365 void (*handler)(struct CPUX86State *env, struct x86_decode *ins);
1366 } handlers[] = {
1367 {X86_DECODE_CMD_INVL, NULL,},
1368 {X86_DECODE_CMD_MOV, exec_mov},
1369 {X86_DECODE_CMD_ADD, exec_add},
1370 {X86_DECODE_CMD_OR, exec_or},
1371 {X86_DECODE_CMD_ADC, exec_adc},
1372 {X86_DECODE_CMD_SBB, exec_sbb},
1373 {X86_DECODE_CMD_AND, exec_and},
1374 {X86_DECODE_CMD_SUB, exec_sub},
1375 {X86_DECODE_CMD_NEG, exec_neg},
1376 {X86_DECODE_CMD_XOR, exec_xor},
1377 {X86_DECODE_CMD_CMP, exec_cmp},
1378 {X86_DECODE_CMD_INC, exec_inc},
1379 {X86_DECODE_CMD_DEC, exec_dec},
1380 {X86_DECODE_CMD_TST, exec_tst},
1381 {X86_DECODE_CMD_NOT, exec_not},
1382 {X86_DECODE_CMD_MOVZX, exec_movzx},
1383 {X86_DECODE_CMD_OUT, exec_out},
1384 {X86_DECODE_CMD_IN, exec_in},
1385 {X86_DECODE_CMD_INS, exec_ins},
1386 {X86_DECODE_CMD_OUTS, exec_outs},
1387 {X86_DECODE_CMD_RDMSR, exec_rdmsr},
1388 {X86_DECODE_CMD_WRMSR, exec_wrmsr},
1389 {X86_DECODE_CMD_BT, exec_bt},
1390 {X86_DECODE_CMD_BTR, exec_btr},
1391 {X86_DECODE_CMD_BTC, exec_btc},
1392 {X86_DECODE_CMD_BTS, exec_bts},
1393 {X86_DECODE_CMD_SHL, exec_shl},
1394 {X86_DECODE_CMD_ROL, exec_rol},
1395 {X86_DECODE_CMD_ROR, exec_ror},
1396 {X86_DECODE_CMD_RCR, exec_rcr},
1397 {X86_DECODE_CMD_RCL, exec_rcl},
1398 /*{X86_DECODE_CMD_CPUID, exec_cpuid},*/
1399 {X86_DECODE_CMD_MOVS, exec_movs},
1400 {X86_DECODE_CMD_CMPS, exec_cmps},
1401 {X86_DECODE_CMD_STOS, exec_stos},
1402 {X86_DECODE_CMD_SCAS, exec_scas},
1403 {X86_DECODE_CMD_LODS, exec_lods},
1404 {X86_DECODE_CMD_MOVSX, exec_movsx},
1405 {X86_DECODE_CMD_XCHG, exec_xchg},
1406 {X86_DECODE_CMD_XADD, exec_xadd},
1407 };
1408
1409 static struct cmd_handler _cmd_handler[X86_DECODE_CMD_LAST];
1410
1411 static void init_cmd_handler()
1412 {
1413 int i;
1414 for (i = 0; i < ARRAY_SIZE(handlers); i++) {
1415 _cmd_handler[handlers[i].cmd] = handlers[i];
1416 }
1417 }
1418
1419 void load_regs(struct CPUState *cpu)
1420 {
1421 X86CPU *x86_cpu = X86_CPU(cpu);
1422 CPUX86State *env = &x86_cpu->env;
1423
1424 int i = 0;
1425 RRX(env, R_EAX) = rreg(cpu->hvf_fd, HV_X86_RAX);
1426 RRX(env, R_EBX) = rreg(cpu->hvf_fd, HV_X86_RBX);
1427 RRX(env, R_ECX) = rreg(cpu->hvf_fd, HV_X86_RCX);
1428 RRX(env, R_EDX) = rreg(cpu->hvf_fd, HV_X86_RDX);
1429 RRX(env, R_ESI) = rreg(cpu->hvf_fd, HV_X86_RSI);
1430 RRX(env, R_EDI) = rreg(cpu->hvf_fd, HV_X86_RDI);
1431 RRX(env, R_ESP) = rreg(cpu->hvf_fd, HV_X86_RSP);
1432 RRX(env, R_EBP) = rreg(cpu->hvf_fd, HV_X86_RBP);
1433 for (i = 8; i < 16; i++) {
1434 RRX(env, i) = rreg(cpu->hvf_fd, HV_X86_RAX + i);
1435 }
1436
1437 RFLAGS(env) = rreg(cpu->hvf_fd, HV_X86_RFLAGS);
1438 rflags_to_lflags(env);
1439 RIP(env) = rreg(cpu->hvf_fd, HV_X86_RIP);
1440 }
1441
1442 void store_regs(struct CPUState *cpu)
1443 {
1444 X86CPU *x86_cpu = X86_CPU(cpu);
1445 CPUX86State *env = &x86_cpu->env;
1446
1447 int i = 0;
1448 wreg(cpu->hvf_fd, HV_X86_RAX, RAX(env));
1449 wreg(cpu->hvf_fd, HV_X86_RBX, RBX(env));
1450 wreg(cpu->hvf_fd, HV_X86_RCX, RCX(env));
1451 wreg(cpu->hvf_fd, HV_X86_RDX, RDX(env));
1452 wreg(cpu->hvf_fd, HV_X86_RSI, RSI(env));
1453 wreg(cpu->hvf_fd, HV_X86_RDI, RDI(env));
1454 wreg(cpu->hvf_fd, HV_X86_RBP, RBP(env));
1455 wreg(cpu->hvf_fd, HV_X86_RSP, RSP(env));
1456 for (i = 8; i < 16; i++) {
1457 wreg(cpu->hvf_fd, HV_X86_RAX + i, RRX(env, i));
1458 }
1459
1460 lflags_to_rflags(env);
1461 wreg(cpu->hvf_fd, HV_X86_RFLAGS, RFLAGS(env));
1462 macvm_set_rip(cpu, RIP(env));
1463 }
1464
1465 bool exec_instruction(struct CPUX86State *env, struct x86_decode *ins)
1466 {
1467 /*if (hvf_vcpu_id(cpu))
1468 printf("%d, %llx: exec_instruction %s\n", hvf_vcpu_id(cpu), RIP(cpu),
1469 decode_cmd_to_string(ins->cmd));*/
1470
1471 if (!_cmd_handler[ins->cmd].handler) {
1472 printf("Unimplemented handler (%llx) for %d (%x %x) \n", RIP(env),
1473 ins->cmd, ins->opcode[0],
1474 ins->opcode_len > 1 ? ins->opcode[1] : 0);
1475 RIP(env) += ins->len;
1476 return true;
1477 }
1478
1479 _cmd_handler[ins->cmd].handler(env, ins);
1480 return true;
1481 }
1482
1483 void init_emu()
1484 {
1485 init_cmd_handler();
1486 }