]> git.proxmox.com Git - mirror_qemu.git/blob - target/i386/hvf/x86_emu.c
i386: hvf: abort on decoding error
[mirror_qemu.git] / target / i386 / hvf / x86_emu.c
1 /*
2 * Copyright (C) 2016 Veertu Inc,
3 * Copyright (C) 2017 Google Inc,
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
14 *
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this program; if not, see <http://www.gnu.org/licenses/>.
17 */
18
19 /////////////////////////////////////////////////////////////////////////
20 //
21 // Copyright (C) 2001-2012 The Bochs Project
22 //
23 // This library is free software; you can redistribute it and/or
24 // modify it under the terms of the GNU Lesser General Public
25 // License as published by the Free Software Foundation; either
26 // version 2 of the License, or (at your option) any later version.
27 //
28 // This library is distributed in the hope that it will be useful,
29 // but WITHOUT ANY WARRANTY; without even the implied warranty of
30 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
31 // Lesser General Public License for more details.
32 //
33 // You should have received a copy of the GNU Lesser General Public
34 // License along with this library; if not, write to the Free Software
35 // Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA B 02110-1301 USA
36 /////////////////////////////////////////////////////////////////////////
37
38 #include "qemu/osdep.h"
39
40 #include "qemu-common.h"
41 #include "x86_decode.h"
42 #include "x86.h"
43 #include "x86_emu.h"
44 #include "x86_mmu.h"
45 #include "x86_flags.h"
46 #include "vmcs.h"
47 #include "vmx.h"
48
49 void hvf_handle_io(struct CPUState *cpu, uint16_t port, void *data,
50 int direction, int size, uint32_t count);
51
52 #define EXEC_2OP_LOGIC_CMD(env, decode, cmd, FLAGS_FUNC, save_res) \
53 { \
54 fetch_operands(env, decode, 2, true, true, false); \
55 switch (decode->operand_size) { \
56 case 1: \
57 { \
58 uint8_t v1 = (uint8_t)decode->op[0].val; \
59 uint8_t v2 = (uint8_t)decode->op[1].val; \
60 uint8_t diff = v1 cmd v2; \
61 if (save_res) { \
62 write_val_ext(env, decode->op[0].ptr, diff, 1); \
63 } \
64 FLAGS_FUNC##_8(diff); \
65 break; \
66 } \
67 case 2: \
68 { \
69 uint16_t v1 = (uint16_t)decode->op[0].val; \
70 uint16_t v2 = (uint16_t)decode->op[1].val; \
71 uint16_t diff = v1 cmd v2; \
72 if (save_res) { \
73 write_val_ext(env, decode->op[0].ptr, diff, 2); \
74 } \
75 FLAGS_FUNC##_16(diff); \
76 break; \
77 } \
78 case 4: \
79 { \
80 uint32_t v1 = (uint32_t)decode->op[0].val; \
81 uint32_t v2 = (uint32_t)decode->op[1].val; \
82 uint32_t diff = v1 cmd v2; \
83 if (save_res) { \
84 write_val_ext(env, decode->op[0].ptr, diff, 4); \
85 } \
86 FLAGS_FUNC##_32(diff); \
87 break; \
88 } \
89 default: \
90 VM_PANIC("bad size\n"); \
91 } \
92 } \
93
94
95 #define EXEC_2OP_ARITH_CMD(env, decode, cmd, FLAGS_FUNC, save_res) \
96 { \
97 fetch_operands(env, decode, 2, true, true, false); \
98 switch (decode->operand_size) { \
99 case 1: \
100 { \
101 uint8_t v1 = (uint8_t)decode->op[0].val; \
102 uint8_t v2 = (uint8_t)decode->op[1].val; \
103 uint8_t diff = v1 cmd v2; \
104 if (save_res) { \
105 write_val_ext(env, decode->op[0].ptr, diff, 1); \
106 } \
107 FLAGS_FUNC##_8(v1, v2, diff); \
108 break; \
109 } \
110 case 2: \
111 { \
112 uint16_t v1 = (uint16_t)decode->op[0].val; \
113 uint16_t v2 = (uint16_t)decode->op[1].val; \
114 uint16_t diff = v1 cmd v2; \
115 if (save_res) { \
116 write_val_ext(env, decode->op[0].ptr, diff, 2); \
117 } \
118 FLAGS_FUNC##_16(v1, v2, diff); \
119 break; \
120 } \
121 case 4: \
122 { \
123 uint32_t v1 = (uint32_t)decode->op[0].val; \
124 uint32_t v2 = (uint32_t)decode->op[1].val; \
125 uint32_t diff = v1 cmd v2; \
126 if (save_res) { \
127 write_val_ext(env, decode->op[0].ptr, diff, 4); \
128 } \
129 FLAGS_FUNC##_32(v1, v2, diff); \
130 break; \
131 } \
132 default: \
133 VM_PANIC("bad size\n"); \
134 } \
135 }
136
137 addr_t read_reg(CPUX86State *env, int reg, int size)
138 {
139 switch (size) {
140 case 1:
141 return env->hvf_emul->regs[reg].lx;
142 case 2:
143 return env->hvf_emul->regs[reg].rx;
144 case 4:
145 return env->hvf_emul->regs[reg].erx;
146 case 8:
147 return env->hvf_emul->regs[reg].rrx;
148 default:
149 abort();
150 }
151 return 0;
152 }
153
154 void write_reg(CPUX86State *env, int reg, addr_t val, int size)
155 {
156 switch (size) {
157 case 1:
158 env->hvf_emul->regs[reg].lx = val;
159 break;
160 case 2:
161 env->hvf_emul->regs[reg].rx = val;
162 break;
163 case 4:
164 env->hvf_emul->regs[reg].rrx = (uint32_t)val;
165 break;
166 case 8:
167 env->hvf_emul->regs[reg].rrx = val;
168 break;
169 default:
170 abort();
171 }
172 }
173
174 addr_t read_val_from_reg(addr_t reg_ptr, int size)
175 {
176 addr_t val;
177
178 switch (size) {
179 case 1:
180 val = *(uint8_t *)reg_ptr;
181 break;
182 case 2:
183 val = *(uint16_t *)reg_ptr;
184 break;
185 case 4:
186 val = *(uint32_t *)reg_ptr;
187 break;
188 case 8:
189 val = *(uint64_t *)reg_ptr;
190 break;
191 default:
192 abort();
193 }
194 return val;
195 }
196
197 void write_val_to_reg(addr_t reg_ptr, addr_t val, int size)
198 {
199 switch (size) {
200 case 1:
201 *(uint8_t *)reg_ptr = val;
202 break;
203 case 2:
204 *(uint16_t *)reg_ptr = val;
205 break;
206 case 4:
207 *(uint64_t *)reg_ptr = (uint32_t)val;
208 break;
209 case 8:
210 *(uint64_t *)reg_ptr = val;
211 break;
212 default:
213 abort();
214 }
215 }
216
217 static bool is_host_reg(struct CPUX86State *env, addr_t ptr)
218 {
219 return (ptr - (addr_t)&env->hvf_emul->regs[0]) < sizeof(env->hvf_emul->regs);
220 }
221
222 void write_val_ext(struct CPUX86State *env, addr_t ptr, addr_t val, int size)
223 {
224 if (is_host_reg(env, ptr)) {
225 write_val_to_reg(ptr, val, size);
226 return;
227 }
228 vmx_write_mem(ENV_GET_CPU(env), ptr, &val, size);
229 }
230
231 uint8_t *read_mmio(struct CPUX86State *env, addr_t ptr, int bytes)
232 {
233 vmx_read_mem(ENV_GET_CPU(env), env->hvf_emul->mmio_buf, ptr, bytes);
234 return env->hvf_emul->mmio_buf;
235 }
236
237
238 addr_t read_val_ext(struct CPUX86State *env, addr_t ptr, int size)
239 {
240 addr_t val;
241 uint8_t *mmio_ptr;
242
243 if (is_host_reg(env, ptr)) {
244 return read_val_from_reg(ptr, size);
245 }
246
247 mmio_ptr = read_mmio(env, ptr, size);
248 switch (size) {
249 case 1:
250 val = *(uint8_t *)mmio_ptr;
251 break;
252 case 2:
253 val = *(uint16_t *)mmio_ptr;
254 break;
255 case 4:
256 val = *(uint32_t *)mmio_ptr;
257 break;
258 case 8:
259 val = *(uint64_t *)mmio_ptr;
260 break;
261 default:
262 VM_PANIC("bad size\n");
263 break;
264 }
265 return val;
266 }
267
268 static void fetch_operands(struct CPUX86State *env, struct x86_decode *decode,
269 int n, bool val_op0, bool val_op1, bool val_op2)
270 {
271 int i;
272 bool calc_val[3] = {val_op0, val_op1, val_op2};
273
274 for (i = 0; i < n; i++) {
275 switch (decode->op[i].type) {
276 case X86_VAR_IMMEDIATE:
277 break;
278 case X86_VAR_REG:
279 VM_PANIC_ON(!decode->op[i].ptr);
280 if (calc_val[i]) {
281 decode->op[i].val = read_val_from_reg(decode->op[i].ptr,
282 decode->operand_size);
283 }
284 break;
285 case X86_VAR_RM:
286 calc_modrm_operand(env, decode, &decode->op[i]);
287 if (calc_val[i]) {
288 decode->op[i].val = read_val_ext(env, decode->op[i].ptr,
289 decode->operand_size);
290 }
291 break;
292 case X86_VAR_OFFSET:
293 decode->op[i].ptr = decode_linear_addr(env, decode,
294 decode->op[i].ptr,
295 R_DS);
296 if (calc_val[i]) {
297 decode->op[i].val = read_val_ext(env, decode->op[i].ptr,
298 decode->operand_size);
299 }
300 break;
301 default:
302 break;
303 }
304 }
305 }
306
307 static void exec_mov(struct CPUX86State *env, struct x86_decode *decode)
308 {
309 fetch_operands(env, decode, 2, false, true, false);
310 write_val_ext(env, decode->op[0].ptr, decode->op[1].val,
311 decode->operand_size);
312
313 RIP(env) += decode->len;
314 }
315
316 static void exec_add(struct CPUX86State *env, struct x86_decode *decode)
317 {
318 EXEC_2OP_ARITH_CMD(env, decode, +, SET_FLAGS_OSZAPC_ADD, true);
319 RIP(env) += decode->len;
320 }
321
322 static void exec_or(struct CPUX86State *env, struct x86_decode *decode)
323 {
324 EXEC_2OP_LOGIC_CMD(env, decode, |, SET_FLAGS_OSZAPC_LOGIC, true);
325 RIP(env) += decode->len;
326 }
327
328 static void exec_adc(struct CPUX86State *env, struct x86_decode *decode)
329 {
330 EXEC_2OP_ARITH_CMD(env, decode, +get_CF(env)+, SET_FLAGS_OSZAPC_ADD, true);
331 RIP(env) += decode->len;
332 }
333
334 static void exec_sbb(struct CPUX86State *env, struct x86_decode *decode)
335 {
336 EXEC_2OP_ARITH_CMD(env, decode, -get_CF(env)-, SET_FLAGS_OSZAPC_SUB, true);
337 RIP(env) += decode->len;
338 }
339
340 static void exec_and(struct CPUX86State *env, struct x86_decode *decode)
341 {
342 EXEC_2OP_LOGIC_CMD(env, decode, &, SET_FLAGS_OSZAPC_LOGIC, true);
343 RIP(env) += decode->len;
344 }
345
346 static void exec_sub(struct CPUX86State *env, struct x86_decode *decode)
347 {
348 EXEC_2OP_ARITH_CMD(env, decode, -, SET_FLAGS_OSZAPC_SUB, true);
349 RIP(env) += decode->len;
350 }
351
352 static void exec_xor(struct CPUX86State *env, struct x86_decode *decode)
353 {
354 EXEC_2OP_LOGIC_CMD(env, decode, ^, SET_FLAGS_OSZAPC_LOGIC, true);
355 RIP(env) += decode->len;
356 }
357
358 static void exec_neg(struct CPUX86State *env, struct x86_decode *decode)
359 {
360 /*EXEC_2OP_ARITH_CMD(env, decode, -, SET_FLAGS_OSZAPC_SUB, false);*/
361 int32_t val;
362 fetch_operands(env, decode, 2, true, true, false);
363
364 val = 0 - sign(decode->op[1].val, decode->operand_size);
365 write_val_ext(env, decode->op[1].ptr, val, decode->operand_size);
366
367 if (4 == decode->operand_size) {
368 SET_FLAGS_OSZAPC_SUB_32(0, 0 - val, val);
369 } else if (2 == decode->operand_size) {
370 SET_FLAGS_OSZAPC_SUB_16(0, 0 - val, val);
371 } else if (1 == decode->operand_size) {
372 SET_FLAGS_OSZAPC_SUB_8(0, 0 - val, val);
373 } else {
374 VM_PANIC("bad op size\n");
375 }
376
377 /*lflags_to_rflags(env);*/
378 RIP(env) += decode->len;
379 }
380
381 static void exec_cmp(struct CPUX86State *env, struct x86_decode *decode)
382 {
383 EXEC_2OP_ARITH_CMD(env, decode, -, SET_FLAGS_OSZAPC_SUB, false);
384 RIP(env) += decode->len;
385 }
386
387 static void exec_inc(struct CPUX86State *env, struct x86_decode *decode)
388 {
389 decode->op[1].type = X86_VAR_IMMEDIATE;
390 decode->op[1].val = 0;
391
392 EXEC_2OP_ARITH_CMD(env, decode, +1+, SET_FLAGS_OSZAP_ADD, true);
393
394 RIP(env) += decode->len;
395 }
396
397 static void exec_dec(struct CPUX86State *env, struct x86_decode *decode)
398 {
399 decode->op[1].type = X86_VAR_IMMEDIATE;
400 decode->op[1].val = 0;
401
402 EXEC_2OP_ARITH_CMD(env, decode, -1-, SET_FLAGS_OSZAP_SUB, true);
403 RIP(env) += decode->len;
404 }
405
406 static void exec_tst(struct CPUX86State *env, struct x86_decode *decode)
407 {
408 EXEC_2OP_LOGIC_CMD(env, decode, &, SET_FLAGS_OSZAPC_LOGIC, false);
409 RIP(env) += decode->len;
410 }
411
412 static void exec_not(struct CPUX86State *env, struct x86_decode *decode)
413 {
414 fetch_operands(env, decode, 1, true, false, false);
415
416 write_val_ext(env, decode->op[0].ptr, ~decode->op[0].val,
417 decode->operand_size);
418 RIP(env) += decode->len;
419 }
420
421 void exec_movzx(struct CPUX86State *env, struct x86_decode *decode)
422 {
423 int src_op_size;
424 int op_size = decode->operand_size;
425
426 fetch_operands(env, decode, 1, false, false, false);
427
428 if (0xb6 == decode->opcode[1]) {
429 src_op_size = 1;
430 } else {
431 src_op_size = 2;
432 }
433 decode->operand_size = src_op_size;
434 calc_modrm_operand(env, decode, &decode->op[1]);
435 decode->op[1].val = read_val_ext(env, decode->op[1].ptr, src_op_size);
436 write_val_ext(env, decode->op[0].ptr, decode->op[1].val, op_size);
437
438 RIP(env) += decode->len;
439 }
440
441 static void exec_out(struct CPUX86State *env, struct x86_decode *decode)
442 {
443 switch (decode->opcode[0]) {
444 case 0xe6:
445 hvf_handle_io(ENV_GET_CPU(env), decode->op[0].val, &AL(env), 1, 1, 1);
446 break;
447 case 0xe7:
448 hvf_handle_io(ENV_GET_CPU(env), decode->op[0].val, &RAX(env), 1,
449 decode->operand_size, 1);
450 break;
451 case 0xee:
452 hvf_handle_io(ENV_GET_CPU(env), DX(env), &AL(env), 1, 1, 1);
453 break;
454 case 0xef:
455 hvf_handle_io(ENV_GET_CPU(env), DX(env), &RAX(env), 1, decode->operand_size, 1);
456 break;
457 default:
458 VM_PANIC("Bad out opcode\n");
459 break;
460 }
461 RIP(env) += decode->len;
462 }
463
464 static void exec_in(struct CPUX86State *env, struct x86_decode *decode)
465 {
466 addr_t val = 0;
467 switch (decode->opcode[0]) {
468 case 0xe4:
469 hvf_handle_io(ENV_GET_CPU(env), decode->op[0].val, &AL(env), 0, 1, 1);
470 break;
471 case 0xe5:
472 hvf_handle_io(ENV_GET_CPU(env), decode->op[0].val, &val, 0, decode->operand_size, 1);
473 if (decode->operand_size == 2) {
474 AX(env) = val;
475 } else {
476 RAX(env) = (uint32_t)val;
477 }
478 break;
479 case 0xec:
480 hvf_handle_io(ENV_GET_CPU(env), DX(env), &AL(env), 0, 1, 1);
481 break;
482 case 0xed:
483 hvf_handle_io(ENV_GET_CPU(env), DX(env), &val, 0, decode->operand_size, 1);
484 if (decode->operand_size == 2) {
485 AX(env) = val;
486 } else {
487 RAX(env) = (uint32_t)val;
488 }
489
490 break;
491 default:
492 VM_PANIC("Bad in opcode\n");
493 break;
494 }
495
496 RIP(env) += decode->len;
497 }
498
499 static inline void string_increment_reg(struct CPUX86State *env, int reg,
500 struct x86_decode *decode)
501 {
502 addr_t val = read_reg(env, reg, decode->addressing_size);
503 if (env->hvf_emul->rflags.df) {
504 val -= decode->operand_size;
505 } else {
506 val += decode->operand_size;
507 }
508 write_reg(env, reg, val, decode->addressing_size);
509 }
510
511 static inline void string_rep(struct CPUX86State *env, struct x86_decode *decode,
512 void (*func)(struct CPUX86State *env,
513 struct x86_decode *ins), int rep)
514 {
515 addr_t rcx = read_reg(env, R_ECX, decode->addressing_size);
516 while (rcx--) {
517 func(env, decode);
518 write_reg(env, R_ECX, rcx, decode->addressing_size);
519 if ((PREFIX_REP == rep) && !get_ZF(env)) {
520 break;
521 }
522 if ((PREFIX_REPN == rep) && get_ZF(env)) {
523 break;
524 }
525 }
526 }
527
528 static void exec_ins_single(struct CPUX86State *env, struct x86_decode *decode)
529 {
530 addr_t addr = linear_addr_size(ENV_GET_CPU(env), RDI(env), decode->addressing_size,
531 R_ES);
532
533 hvf_handle_io(ENV_GET_CPU(env), DX(env), env->hvf_emul->mmio_buf, 0,
534 decode->operand_size, 1);
535 vmx_write_mem(ENV_GET_CPU(env), addr, env->hvf_emul->mmio_buf, decode->operand_size);
536
537 string_increment_reg(env, R_EDI, decode);
538 }
539
540 static void exec_ins(struct CPUX86State *env, struct x86_decode *decode)
541 {
542 if (decode->rep) {
543 string_rep(env, decode, exec_ins_single, 0);
544 } else {
545 exec_ins_single(env, decode);
546 }
547
548 RIP(env) += decode->len;
549 }
550
551 static void exec_outs_single(struct CPUX86State *env, struct x86_decode *decode)
552 {
553 addr_t addr = decode_linear_addr(env, decode, RSI(env), R_DS);
554
555 vmx_read_mem(ENV_GET_CPU(env), env->hvf_emul->mmio_buf, addr, decode->operand_size);
556 hvf_handle_io(ENV_GET_CPU(env), DX(env), env->hvf_emul->mmio_buf, 1,
557 decode->operand_size, 1);
558
559 string_increment_reg(env, R_ESI, decode);
560 }
561
562 static void exec_outs(struct CPUX86State *env, struct x86_decode *decode)
563 {
564 if (decode->rep) {
565 string_rep(env, decode, exec_outs_single, 0);
566 } else {
567 exec_outs_single(env, decode);
568 }
569
570 RIP(env) += decode->len;
571 }
572
573 static void exec_movs_single(struct CPUX86State *env, struct x86_decode *decode)
574 {
575 addr_t src_addr;
576 addr_t dst_addr;
577 addr_t val;
578
579 src_addr = decode_linear_addr(env, decode, RSI(env), R_DS);
580 dst_addr = linear_addr_size(ENV_GET_CPU(env), RDI(env), decode->addressing_size,
581 R_ES);
582
583 val = read_val_ext(env, src_addr, decode->operand_size);
584 write_val_ext(env, dst_addr, val, decode->operand_size);
585
586 string_increment_reg(env, R_ESI, decode);
587 string_increment_reg(env, R_EDI, decode);
588 }
589
590 static void exec_movs(struct CPUX86State *env, struct x86_decode *decode)
591 {
592 if (decode->rep) {
593 string_rep(env, decode, exec_movs_single, 0);
594 } else {
595 exec_movs_single(env, decode);
596 }
597
598 RIP(env) += decode->len;
599 }
600
601 static void exec_cmps_single(struct CPUX86State *env, struct x86_decode *decode)
602 {
603 addr_t src_addr;
604 addr_t dst_addr;
605
606 src_addr = decode_linear_addr(env, decode, RSI(env), R_DS);
607 dst_addr = linear_addr_size(ENV_GET_CPU(env), RDI(env), decode->addressing_size,
608 R_ES);
609
610 decode->op[0].type = X86_VAR_IMMEDIATE;
611 decode->op[0].val = read_val_ext(env, src_addr, decode->operand_size);
612 decode->op[1].type = X86_VAR_IMMEDIATE;
613 decode->op[1].val = read_val_ext(env, dst_addr, decode->operand_size);
614
615 EXEC_2OP_ARITH_CMD(env, decode, -, SET_FLAGS_OSZAPC_SUB, false);
616
617 string_increment_reg(env, R_ESI, decode);
618 string_increment_reg(env, R_EDI, decode);
619 }
620
621 static void exec_cmps(struct CPUX86State *env, struct x86_decode *decode)
622 {
623 if (decode->rep) {
624 string_rep(env, decode, exec_cmps_single, decode->rep);
625 } else {
626 exec_cmps_single(env, decode);
627 }
628 RIP(env) += decode->len;
629 }
630
631
632 static void exec_stos_single(struct CPUX86State *env, struct x86_decode *decode)
633 {
634 addr_t addr;
635 addr_t val;
636
637 addr = linear_addr_size(ENV_GET_CPU(env), RDI(env), decode->addressing_size, R_ES);
638 val = read_reg(env, R_EAX, decode->operand_size);
639 vmx_write_mem(ENV_GET_CPU(env), addr, &val, decode->operand_size);
640
641 string_increment_reg(env, R_EDI, decode);
642 }
643
644
645 static void exec_stos(struct CPUX86State *env, struct x86_decode *decode)
646 {
647 if (decode->rep) {
648 string_rep(env, decode, exec_stos_single, 0);
649 } else {
650 exec_stos_single(env, decode);
651 }
652
653 RIP(env) += decode->len;
654 }
655
656 static void exec_scas_single(struct CPUX86State *env, struct x86_decode *decode)
657 {
658 addr_t addr;
659
660 addr = linear_addr_size(ENV_GET_CPU(env), RDI(env), decode->addressing_size, R_ES);
661 decode->op[1].type = X86_VAR_IMMEDIATE;
662 vmx_read_mem(ENV_GET_CPU(env), &decode->op[1].val, addr, decode->operand_size);
663
664 EXEC_2OP_ARITH_CMD(env, decode, -, SET_FLAGS_OSZAPC_SUB, false);
665 string_increment_reg(env, R_EDI, decode);
666 }
667
668 static void exec_scas(struct CPUX86State *env, struct x86_decode *decode)
669 {
670 decode->op[0].type = X86_VAR_REG;
671 decode->op[0].reg = R_EAX;
672 if (decode->rep) {
673 string_rep(env, decode, exec_scas_single, decode->rep);
674 } else {
675 exec_scas_single(env, decode);
676 }
677
678 RIP(env) += decode->len;
679 }
680
681 static void exec_lods_single(struct CPUX86State *env, struct x86_decode *decode)
682 {
683 addr_t addr;
684 addr_t val = 0;
685
686 addr = decode_linear_addr(env, decode, RSI(env), R_DS);
687 vmx_read_mem(ENV_GET_CPU(env), &val, addr, decode->operand_size);
688 write_reg(env, R_EAX, val, decode->operand_size);
689
690 string_increment_reg(env, R_ESI, decode);
691 }
692
693 static void exec_lods(struct CPUX86State *env, struct x86_decode *decode)
694 {
695 if (decode->rep) {
696 string_rep(env, decode, exec_lods_single, 0);
697 } else {
698 exec_lods_single(env, decode);
699 }
700
701 RIP(env) += decode->len;
702 }
703
704 #define MSR_IA32_UCODE_REV 0x00000017
705
706 void simulate_rdmsr(struct CPUState *cpu)
707 {
708 X86CPU *x86_cpu = X86_CPU(cpu);
709 CPUX86State *env = &x86_cpu->env;
710 uint32_t msr = ECX(env);
711 uint64_t val = 0;
712
713 switch (msr) {
714 case MSR_IA32_TSC:
715 val = rdtscp() + rvmcs(cpu->hvf_fd, VMCS_TSC_OFFSET);
716 break;
717 case MSR_IA32_APICBASE:
718 val = cpu_get_apic_base(X86_CPU(cpu)->apic_state);
719 break;
720 case MSR_IA32_UCODE_REV:
721 val = (0x100000000ULL << 32) | 0x100000000ULL;
722 break;
723 case MSR_EFER:
724 val = rvmcs(cpu->hvf_fd, VMCS_GUEST_IA32_EFER);
725 break;
726 case MSR_FSBASE:
727 val = rvmcs(cpu->hvf_fd, VMCS_GUEST_FS_BASE);
728 break;
729 case MSR_GSBASE:
730 val = rvmcs(cpu->hvf_fd, VMCS_GUEST_GS_BASE);
731 break;
732 case MSR_KERNELGSBASE:
733 val = rvmcs(cpu->hvf_fd, VMCS_HOST_FS_BASE);
734 break;
735 case MSR_STAR:
736 abort();
737 break;
738 case MSR_LSTAR:
739 abort();
740 break;
741 case MSR_CSTAR:
742 abort();
743 break;
744 case MSR_IA32_MISC_ENABLE:
745 val = env->msr_ia32_misc_enable;
746 break;
747 case MSR_MTRRphysBase(0):
748 case MSR_MTRRphysBase(1):
749 case MSR_MTRRphysBase(2):
750 case MSR_MTRRphysBase(3):
751 case MSR_MTRRphysBase(4):
752 case MSR_MTRRphysBase(5):
753 case MSR_MTRRphysBase(6):
754 case MSR_MTRRphysBase(7):
755 val = env->mtrr_var[(ECX(env) - MSR_MTRRphysBase(0)) / 2].base;
756 break;
757 case MSR_MTRRphysMask(0):
758 case MSR_MTRRphysMask(1):
759 case MSR_MTRRphysMask(2):
760 case MSR_MTRRphysMask(3):
761 case MSR_MTRRphysMask(4):
762 case MSR_MTRRphysMask(5):
763 case MSR_MTRRphysMask(6):
764 case MSR_MTRRphysMask(7):
765 val = env->mtrr_var[(ECX(env) - MSR_MTRRphysMask(0)) / 2].mask;
766 break;
767 case MSR_MTRRfix64K_00000:
768 val = env->mtrr_fixed[0];
769 break;
770 case MSR_MTRRfix16K_80000:
771 case MSR_MTRRfix16K_A0000:
772 val = env->mtrr_fixed[ECX(env) - MSR_MTRRfix16K_80000 + 1];
773 break;
774 case MSR_MTRRfix4K_C0000:
775 case MSR_MTRRfix4K_C8000:
776 case MSR_MTRRfix4K_D0000:
777 case MSR_MTRRfix4K_D8000:
778 case MSR_MTRRfix4K_E0000:
779 case MSR_MTRRfix4K_E8000:
780 case MSR_MTRRfix4K_F0000:
781 case MSR_MTRRfix4K_F8000:
782 val = env->mtrr_fixed[ECX(env) - MSR_MTRRfix4K_C0000 + 3];
783 break;
784 case MSR_MTRRdefType:
785 val = env->mtrr_deftype;
786 break;
787 default:
788 /* fprintf(stderr, "%s: unknown msr 0x%x\n", __func__, msr); */
789 val = 0;
790 break;
791 }
792
793 RAX(env) = (uint32_t)val;
794 RDX(env) = (uint32_t)(val >> 32);
795 }
796
797 static void exec_rdmsr(struct CPUX86State *env, struct x86_decode *decode)
798 {
799 simulate_rdmsr(ENV_GET_CPU(env));
800 RIP(env) += decode->len;
801 }
802
803 void simulate_wrmsr(struct CPUState *cpu)
804 {
805 X86CPU *x86_cpu = X86_CPU(cpu);
806 CPUX86State *env = &x86_cpu->env;
807 uint32_t msr = ECX(env);
808 uint64_t data = ((uint64_t)EDX(env) << 32) | EAX(env);
809
810 switch (msr) {
811 case MSR_IA32_TSC:
812 /* if (!osx_is_sierra())
813 wvmcs(cpu->hvf_fd, VMCS_TSC_OFFSET, data - rdtscp());
814 hv_vm_sync_tsc(data);*/
815 break;
816 case MSR_IA32_APICBASE:
817 cpu_set_apic_base(X86_CPU(cpu)->apic_state, data);
818 break;
819 case MSR_FSBASE:
820 wvmcs(cpu->hvf_fd, VMCS_GUEST_FS_BASE, data);
821 break;
822 case MSR_GSBASE:
823 wvmcs(cpu->hvf_fd, VMCS_GUEST_GS_BASE, data);
824 break;
825 case MSR_KERNELGSBASE:
826 wvmcs(cpu->hvf_fd, VMCS_HOST_FS_BASE, data);
827 break;
828 case MSR_STAR:
829 abort();
830 break;
831 case MSR_LSTAR:
832 abort();
833 break;
834 case MSR_CSTAR:
835 abort();
836 break;
837 case MSR_EFER:
838 /*printf("new efer %llx\n", EFER(cpu));*/
839 wvmcs(cpu->hvf_fd, VMCS_GUEST_IA32_EFER, data);
840 if (data & MSR_EFER_NXE) {
841 hv_vcpu_invalidate_tlb(cpu->hvf_fd);
842 }
843 break;
844 case MSR_MTRRphysBase(0):
845 case MSR_MTRRphysBase(1):
846 case MSR_MTRRphysBase(2):
847 case MSR_MTRRphysBase(3):
848 case MSR_MTRRphysBase(4):
849 case MSR_MTRRphysBase(5):
850 case MSR_MTRRphysBase(6):
851 case MSR_MTRRphysBase(7):
852 env->mtrr_var[(ECX(env) - MSR_MTRRphysBase(0)) / 2].base = data;
853 break;
854 case MSR_MTRRphysMask(0):
855 case MSR_MTRRphysMask(1):
856 case MSR_MTRRphysMask(2):
857 case MSR_MTRRphysMask(3):
858 case MSR_MTRRphysMask(4):
859 case MSR_MTRRphysMask(5):
860 case MSR_MTRRphysMask(6):
861 case MSR_MTRRphysMask(7):
862 env->mtrr_var[(ECX(env) - MSR_MTRRphysMask(0)) / 2].mask = data;
863 break;
864 case MSR_MTRRfix64K_00000:
865 env->mtrr_fixed[ECX(env) - MSR_MTRRfix64K_00000] = data;
866 break;
867 case MSR_MTRRfix16K_80000:
868 case MSR_MTRRfix16K_A0000:
869 env->mtrr_fixed[ECX(env) - MSR_MTRRfix16K_80000 + 1] = data;
870 break;
871 case MSR_MTRRfix4K_C0000:
872 case MSR_MTRRfix4K_C8000:
873 case MSR_MTRRfix4K_D0000:
874 case MSR_MTRRfix4K_D8000:
875 case MSR_MTRRfix4K_E0000:
876 case MSR_MTRRfix4K_E8000:
877 case MSR_MTRRfix4K_F0000:
878 case MSR_MTRRfix4K_F8000:
879 env->mtrr_fixed[ECX(env) - MSR_MTRRfix4K_C0000 + 3] = data;
880 break;
881 case MSR_MTRRdefType:
882 env->mtrr_deftype = data;
883 break;
884 default:
885 break;
886 }
887
888 /* Related to support known hypervisor interface */
889 /* if (g_hypervisor_iface)
890 g_hypervisor_iface->wrmsr_handler(cpu, msr, data);
891
892 printf("write msr %llx\n", RCX(cpu));*/
893 }
894
895 static void exec_wrmsr(struct CPUX86State *env, struct x86_decode *decode)
896 {
897 simulate_wrmsr(ENV_GET_CPU(env));
898 RIP(env) += decode->len;
899 }
900
901 /*
902 * flag:
903 * 0 - bt, 1 - btc, 2 - bts, 3 - btr
904 */
905 static void do_bt(struct CPUX86State *env, struct x86_decode *decode, int flag)
906 {
907 int32_t displacement;
908 uint8_t index;
909 bool cf;
910 int mask = (4 == decode->operand_size) ? 0x1f : 0xf;
911
912 VM_PANIC_ON(decode->rex.rex);
913
914 fetch_operands(env, decode, 2, false, true, false);
915 index = decode->op[1].val & mask;
916
917 if (decode->op[0].type != X86_VAR_REG) {
918 if (4 == decode->operand_size) {
919 displacement = ((int32_t) (decode->op[1].val & 0xffffffe0)) / 32;
920 decode->op[0].ptr += 4 * displacement;
921 } else if (2 == decode->operand_size) {
922 displacement = ((int16_t) (decode->op[1].val & 0xfff0)) / 16;
923 decode->op[0].ptr += 2 * displacement;
924 } else {
925 VM_PANIC("bt 64bit\n");
926 }
927 }
928 decode->op[0].val = read_val_ext(env, decode->op[0].ptr,
929 decode->operand_size);
930 cf = (decode->op[0].val >> index) & 0x01;
931
932 switch (flag) {
933 case 0:
934 set_CF(env, cf);
935 return;
936 case 1:
937 decode->op[0].val ^= (1u << index);
938 break;
939 case 2:
940 decode->op[0].val |= (1u << index);
941 break;
942 case 3:
943 decode->op[0].val &= ~(1u << index);
944 break;
945 }
946 write_val_ext(env, decode->op[0].ptr, decode->op[0].val,
947 decode->operand_size);
948 set_CF(env, cf);
949 }
950
951 static void exec_bt(struct CPUX86State *env, struct x86_decode *decode)
952 {
953 do_bt(env, decode, 0);
954 RIP(env) += decode->len;
955 }
956
957 static void exec_btc(struct CPUX86State *env, struct x86_decode *decode)
958 {
959 do_bt(env, decode, 1);
960 RIP(env) += decode->len;
961 }
962
963 static void exec_btr(struct CPUX86State *env, struct x86_decode *decode)
964 {
965 do_bt(env, decode, 3);
966 RIP(env) += decode->len;
967 }
968
969 static void exec_bts(struct CPUX86State *env, struct x86_decode *decode)
970 {
971 do_bt(env, decode, 2);
972 RIP(env) += decode->len;
973 }
974
975 void exec_shl(struct CPUX86State *env, struct x86_decode *decode)
976 {
977 uint8_t count;
978 int of = 0, cf = 0;
979
980 fetch_operands(env, decode, 2, true, true, false);
981
982 count = decode->op[1].val;
983 count &= 0x1f; /* count is masked to 5 bits*/
984 if (!count) {
985 goto exit;
986 }
987
988 switch (decode->operand_size) {
989 case 1:
990 {
991 uint8_t res = 0;
992 if (count <= 8) {
993 res = (decode->op[0].val << count);
994 cf = (decode->op[0].val >> (8 - count)) & 0x1;
995 of = cf ^ (res >> 7);
996 }
997
998 write_val_ext(env, decode->op[0].ptr, res, 1);
999 SET_FLAGS_OSZAPC_LOGIC_8(res);
1000 SET_FLAGS_OxxxxC(env, of, cf);
1001 break;
1002 }
1003 case 2:
1004 {
1005 uint16_t res = 0;
1006
1007 /* from bochs */
1008 if (count <= 16) {
1009 res = (decode->op[0].val << count);
1010 cf = (decode->op[0].val >> (16 - count)) & 0x1;
1011 of = cf ^ (res >> 15); /* of = cf ^ result15 */
1012 }
1013
1014 write_val_ext(env, decode->op[0].ptr, res, 2);
1015 SET_FLAGS_OSZAPC_LOGIC_16(res);
1016 SET_FLAGS_OxxxxC(env, of, cf);
1017 break;
1018 }
1019 case 4:
1020 {
1021 uint32_t res = decode->op[0].val << count;
1022
1023 write_val_ext(env, decode->op[0].ptr, res, 4);
1024 SET_FLAGS_OSZAPC_LOGIC_32(res);
1025 cf = (decode->op[0].val >> (32 - count)) & 0x1;
1026 of = cf ^ (res >> 31); /* of = cf ^ result31 */
1027 SET_FLAGS_OxxxxC(env, of, cf);
1028 break;
1029 }
1030 default:
1031 abort();
1032 }
1033
1034 exit:
1035 /* lflags_to_rflags(env); */
1036 RIP(env) += decode->len;
1037 }
1038
1039 void exec_movsx(CPUX86State *env, struct x86_decode *decode)
1040 {
1041 int src_op_size;
1042 int op_size = decode->operand_size;
1043
1044 fetch_operands(env, decode, 2, false, false, false);
1045
1046 if (0xbe == decode->opcode[1]) {
1047 src_op_size = 1;
1048 } else {
1049 src_op_size = 2;
1050 }
1051
1052 decode->operand_size = src_op_size;
1053 calc_modrm_operand(env, decode, &decode->op[1]);
1054 decode->op[1].val = sign(read_val_ext(env, decode->op[1].ptr, src_op_size),
1055 src_op_size);
1056
1057 write_val_ext(env, decode->op[0].ptr, decode->op[1].val, op_size);
1058
1059 RIP(env) += decode->len;
1060 }
1061
1062 void exec_ror(struct CPUX86State *env, struct x86_decode *decode)
1063 {
1064 uint8_t count;
1065
1066 fetch_operands(env, decode, 2, true, true, false);
1067 count = decode->op[1].val;
1068
1069 switch (decode->operand_size) {
1070 case 1:
1071 {
1072 uint32_t bit6, bit7;
1073 uint8_t res;
1074
1075 if ((count & 0x07) == 0) {
1076 if (count & 0x18) {
1077 bit6 = ((uint8_t)decode->op[0].val >> 6) & 1;
1078 bit7 = ((uint8_t)decode->op[0].val >> 7) & 1;
1079 SET_FLAGS_OxxxxC(env, bit6 ^ bit7, bit7);
1080 }
1081 } else {
1082 count &= 0x7; /* use only bottom 3 bits */
1083 res = ((uint8_t)decode->op[0].val >> count) |
1084 ((uint8_t)decode->op[0].val << (8 - count));
1085 write_val_ext(env, decode->op[0].ptr, res, 1);
1086 bit6 = (res >> 6) & 1;
1087 bit7 = (res >> 7) & 1;
1088 /* set eflags: ROR count affects the following flags: C, O */
1089 SET_FLAGS_OxxxxC(env, bit6 ^ bit7, bit7);
1090 }
1091 break;
1092 }
1093 case 2:
1094 {
1095 uint32_t bit14, bit15;
1096 uint16_t res;
1097
1098 if ((count & 0x0f) == 0) {
1099 if (count & 0x10) {
1100 bit14 = ((uint16_t)decode->op[0].val >> 14) & 1;
1101 bit15 = ((uint16_t)decode->op[0].val >> 15) & 1;
1102 /* of = result14 ^ result15 */
1103 SET_FLAGS_OxxxxC(env, bit14 ^ bit15, bit15);
1104 }
1105 } else {
1106 count &= 0x0f; /* use only 4 LSB's */
1107 res = ((uint16_t)decode->op[0].val >> count) |
1108 ((uint16_t)decode->op[0].val << (16 - count));
1109 write_val_ext(env, decode->op[0].ptr, res, 2);
1110
1111 bit14 = (res >> 14) & 1;
1112 bit15 = (res >> 15) & 1;
1113 /* of = result14 ^ result15 */
1114 SET_FLAGS_OxxxxC(env, bit14 ^ bit15, bit15);
1115 }
1116 break;
1117 }
1118 case 4:
1119 {
1120 uint32_t bit31, bit30;
1121 uint32_t res;
1122
1123 count &= 0x1f;
1124 if (count) {
1125 res = ((uint32_t)decode->op[0].val >> count) |
1126 ((uint32_t)decode->op[0].val << (32 - count));
1127 write_val_ext(env, decode->op[0].ptr, res, 4);
1128
1129 bit31 = (res >> 31) & 1;
1130 bit30 = (res >> 30) & 1;
1131 /* of = result30 ^ result31 */
1132 SET_FLAGS_OxxxxC(env, bit30 ^ bit31, bit31);
1133 }
1134 break;
1135 }
1136 }
1137 RIP(env) += decode->len;
1138 }
1139
1140 void exec_rol(struct CPUX86State *env, struct x86_decode *decode)
1141 {
1142 uint8_t count;
1143
1144 fetch_operands(env, decode, 2, true, true, false);
1145 count = decode->op[1].val;
1146
1147 switch (decode->operand_size) {
1148 case 1:
1149 {
1150 uint32_t bit0, bit7;
1151 uint8_t res;
1152
1153 if ((count & 0x07) == 0) {
1154 if (count & 0x18) {
1155 bit0 = ((uint8_t)decode->op[0].val & 1);
1156 bit7 = ((uint8_t)decode->op[0].val >> 7);
1157 SET_FLAGS_OxxxxC(env, bit0 ^ bit7, bit0);
1158 }
1159 } else {
1160 count &= 0x7; /* use only lowest 3 bits */
1161 res = ((uint8_t)decode->op[0].val << count) |
1162 ((uint8_t)decode->op[0].val >> (8 - count));
1163
1164 write_val_ext(env, decode->op[0].ptr, res, 1);
1165 /* set eflags:
1166 * ROL count affects the following flags: C, O
1167 */
1168 bit0 = (res & 1);
1169 bit7 = (res >> 7);
1170 SET_FLAGS_OxxxxC(env, bit0 ^ bit7, bit0);
1171 }
1172 break;
1173 }
1174 case 2:
1175 {
1176 uint32_t bit0, bit15;
1177 uint16_t res;
1178
1179 if ((count & 0x0f) == 0) {
1180 if (count & 0x10) {
1181 bit0 = ((uint16_t)decode->op[0].val & 0x1);
1182 bit15 = ((uint16_t)decode->op[0].val >> 15);
1183 /* of = cf ^ result15 */
1184 SET_FLAGS_OxxxxC(env, bit0 ^ bit15, bit0);
1185 }
1186 } else {
1187 count &= 0x0f; /* only use bottom 4 bits */
1188 res = ((uint16_t)decode->op[0].val << count) |
1189 ((uint16_t)decode->op[0].val >> (16 - count));
1190
1191 write_val_ext(env, decode->op[0].ptr, res, 2);
1192 bit0 = (res & 0x1);
1193 bit15 = (res >> 15);
1194 /* of = cf ^ result15 */
1195 SET_FLAGS_OxxxxC(env, bit0 ^ bit15, bit0);
1196 }
1197 break;
1198 }
1199 case 4:
1200 {
1201 uint32_t bit0, bit31;
1202 uint32_t res;
1203
1204 count &= 0x1f;
1205 if (count) {
1206 res = ((uint32_t)decode->op[0].val << count) |
1207 ((uint32_t)decode->op[0].val >> (32 - count));
1208
1209 write_val_ext(env, decode->op[0].ptr, res, 4);
1210 bit0 = (res & 0x1);
1211 bit31 = (res >> 31);
1212 /* of = cf ^ result31 */
1213 SET_FLAGS_OxxxxC(env, bit0 ^ bit31, bit0);
1214 }
1215 break;
1216 }
1217 }
1218 RIP(env) += decode->len;
1219 }
1220
1221
1222 void exec_rcl(struct CPUX86State *env, struct x86_decode *decode)
1223 {
1224 uint8_t count;
1225 int of = 0, cf = 0;
1226
1227 fetch_operands(env, decode, 2, true, true, false);
1228 count = decode->op[1].val & 0x1f;
1229
1230 switch (decode->operand_size) {
1231 case 1:
1232 {
1233 uint8_t op1_8 = decode->op[0].val;
1234 uint8_t res;
1235 count %= 9;
1236 if (!count) {
1237 break;
1238 }
1239
1240 if (1 == count) {
1241 res = (op1_8 << 1) | get_CF(env);
1242 } else {
1243 res = (op1_8 << count) | (get_CF(env) << (count - 1)) |
1244 (op1_8 >> (9 - count));
1245 }
1246
1247 write_val_ext(env, decode->op[0].ptr, res, 1);
1248
1249 cf = (op1_8 >> (8 - count)) & 0x01;
1250 of = cf ^ (res >> 7); /* of = cf ^ result7 */
1251 SET_FLAGS_OxxxxC(env, of, cf);
1252 break;
1253 }
1254 case 2:
1255 {
1256 uint16_t res;
1257 uint16_t op1_16 = decode->op[0].val;
1258
1259 count %= 17;
1260 if (!count) {
1261 break;
1262 }
1263
1264 if (1 == count) {
1265 res = (op1_16 << 1) | get_CF(env);
1266 } else if (count == 16) {
1267 res = (get_CF(env) << 15) | (op1_16 >> 1);
1268 } else { /* 2..15 */
1269 res = (op1_16 << count) | (get_CF(env) << (count - 1)) |
1270 (op1_16 >> (17 - count));
1271 }
1272
1273 write_val_ext(env, decode->op[0].ptr, res, 2);
1274
1275 cf = (op1_16 >> (16 - count)) & 0x1;
1276 of = cf ^ (res >> 15); /* of = cf ^ result15 */
1277 SET_FLAGS_OxxxxC(env, of, cf);
1278 break;
1279 }
1280 case 4:
1281 {
1282 uint32_t res;
1283 uint32_t op1_32 = decode->op[0].val;
1284
1285 if (!count) {
1286 break;
1287 }
1288
1289 if (1 == count) {
1290 res = (op1_32 << 1) | get_CF(env);
1291 } else {
1292 res = (op1_32 << count) | (get_CF(env) << (count - 1)) |
1293 (op1_32 >> (33 - count));
1294 }
1295
1296 write_val_ext(env, decode->op[0].ptr, res, 4);
1297
1298 cf = (op1_32 >> (32 - count)) & 0x1;
1299 of = cf ^ (res >> 31); /* of = cf ^ result31 */
1300 SET_FLAGS_OxxxxC(env, of, cf);
1301 break;
1302 }
1303 }
1304 RIP(env) += decode->len;
1305 }
1306
1307 void exec_rcr(struct CPUX86State *env, struct x86_decode *decode)
1308 {
1309 uint8_t count;
1310 int of = 0, cf = 0;
1311
1312 fetch_operands(env, decode, 2, true, true, false);
1313 count = decode->op[1].val & 0x1f;
1314
1315 switch (decode->operand_size) {
1316 case 1:
1317 {
1318 uint8_t op1_8 = decode->op[0].val;
1319 uint8_t res;
1320
1321 count %= 9;
1322 if (!count) {
1323 break;
1324 }
1325 res = (op1_8 >> count) | (get_CF(env) << (8 - count)) |
1326 (op1_8 << (9 - count));
1327
1328 write_val_ext(env, decode->op[0].ptr, res, 1);
1329
1330 cf = (op1_8 >> (count - 1)) & 0x1;
1331 of = (((res << 1) ^ res) >> 7) & 0x1; /* of = result6 ^ result7 */
1332 SET_FLAGS_OxxxxC(env, of, cf);
1333 break;
1334 }
1335 case 2:
1336 {
1337 uint16_t op1_16 = decode->op[0].val;
1338 uint16_t res;
1339
1340 count %= 17;
1341 if (!count) {
1342 break;
1343 }
1344 res = (op1_16 >> count) | (get_CF(env) << (16 - count)) |
1345 (op1_16 << (17 - count));
1346
1347 write_val_ext(env, decode->op[0].ptr, res, 2);
1348
1349 cf = (op1_16 >> (count - 1)) & 0x1;
1350 of = ((uint16_t)((res << 1) ^ res) >> 15) & 0x1; /* of = result15 ^
1351 result14 */
1352 SET_FLAGS_OxxxxC(env, of, cf);
1353 break;
1354 }
1355 case 4:
1356 {
1357 uint32_t res;
1358 uint32_t op1_32 = decode->op[0].val;
1359
1360 if (!count) {
1361 break;
1362 }
1363
1364 if (1 == count) {
1365 res = (op1_32 >> 1) | (get_CF(env) << 31);
1366 } else {
1367 res = (op1_32 >> count) | (get_CF(env) << (32 - count)) |
1368 (op1_32 << (33 - count));
1369 }
1370
1371 write_val_ext(env, decode->op[0].ptr, res, 4);
1372
1373 cf = (op1_32 >> (count - 1)) & 0x1;
1374 of = ((res << 1) ^ res) >> 31; /* of = result30 ^ result31 */
1375 SET_FLAGS_OxxxxC(env, of, cf);
1376 break;
1377 }
1378 }
1379 RIP(env) += decode->len;
1380 }
1381
1382 static void exec_xchg(struct CPUX86State *env, struct x86_decode *decode)
1383 {
1384 fetch_operands(env, decode, 2, true, true, false);
1385
1386 write_val_ext(env, decode->op[0].ptr, decode->op[1].val,
1387 decode->operand_size);
1388 write_val_ext(env, decode->op[1].ptr, decode->op[0].val,
1389 decode->operand_size);
1390
1391 RIP(env) += decode->len;
1392 }
1393
1394 static void exec_xadd(struct CPUX86State *env, struct x86_decode *decode)
1395 {
1396 EXEC_2OP_ARITH_CMD(env, decode, +, SET_FLAGS_OSZAPC_ADD, true);
1397 write_val_ext(env, decode->op[1].ptr, decode->op[0].val,
1398 decode->operand_size);
1399
1400 RIP(env) += decode->len;
1401 }
1402
1403 static struct cmd_handler {
1404 enum x86_decode_cmd cmd;
1405 void (*handler)(struct CPUX86State *env, struct x86_decode *ins);
1406 } handlers[] = {
1407 {X86_DECODE_CMD_INVL, NULL,},
1408 {X86_DECODE_CMD_MOV, exec_mov},
1409 {X86_DECODE_CMD_ADD, exec_add},
1410 {X86_DECODE_CMD_OR, exec_or},
1411 {X86_DECODE_CMD_ADC, exec_adc},
1412 {X86_DECODE_CMD_SBB, exec_sbb},
1413 {X86_DECODE_CMD_AND, exec_and},
1414 {X86_DECODE_CMD_SUB, exec_sub},
1415 {X86_DECODE_CMD_NEG, exec_neg},
1416 {X86_DECODE_CMD_XOR, exec_xor},
1417 {X86_DECODE_CMD_CMP, exec_cmp},
1418 {X86_DECODE_CMD_INC, exec_inc},
1419 {X86_DECODE_CMD_DEC, exec_dec},
1420 {X86_DECODE_CMD_TST, exec_tst},
1421 {X86_DECODE_CMD_NOT, exec_not},
1422 {X86_DECODE_CMD_MOVZX, exec_movzx},
1423 {X86_DECODE_CMD_OUT, exec_out},
1424 {X86_DECODE_CMD_IN, exec_in},
1425 {X86_DECODE_CMD_INS, exec_ins},
1426 {X86_DECODE_CMD_OUTS, exec_outs},
1427 {X86_DECODE_CMD_RDMSR, exec_rdmsr},
1428 {X86_DECODE_CMD_WRMSR, exec_wrmsr},
1429 {X86_DECODE_CMD_BT, exec_bt},
1430 {X86_DECODE_CMD_BTR, exec_btr},
1431 {X86_DECODE_CMD_BTC, exec_btc},
1432 {X86_DECODE_CMD_BTS, exec_bts},
1433 {X86_DECODE_CMD_SHL, exec_shl},
1434 {X86_DECODE_CMD_ROL, exec_rol},
1435 {X86_DECODE_CMD_ROR, exec_ror},
1436 {X86_DECODE_CMD_RCR, exec_rcr},
1437 {X86_DECODE_CMD_RCL, exec_rcl},
1438 /*{X86_DECODE_CMD_CPUID, exec_cpuid},*/
1439 {X86_DECODE_CMD_MOVS, exec_movs},
1440 {X86_DECODE_CMD_CMPS, exec_cmps},
1441 {X86_DECODE_CMD_STOS, exec_stos},
1442 {X86_DECODE_CMD_SCAS, exec_scas},
1443 {X86_DECODE_CMD_LODS, exec_lods},
1444 {X86_DECODE_CMD_MOVSX, exec_movsx},
1445 {X86_DECODE_CMD_XCHG, exec_xchg},
1446 {X86_DECODE_CMD_XADD, exec_xadd},
1447 };
1448
1449 static struct cmd_handler _cmd_handler[X86_DECODE_CMD_LAST];
1450
1451 static void init_cmd_handler()
1452 {
1453 int i;
1454 for (i = 0; i < ARRAY_SIZE(handlers); i++) {
1455 _cmd_handler[handlers[i].cmd] = handlers[i];
1456 }
1457 }
1458
1459 void load_regs(struct CPUState *cpu)
1460 {
1461 X86CPU *x86_cpu = X86_CPU(cpu);
1462 CPUX86State *env = &x86_cpu->env;
1463
1464 int i = 0;
1465 RRX(env, R_EAX) = rreg(cpu->hvf_fd, HV_X86_RAX);
1466 RRX(env, R_EBX) = rreg(cpu->hvf_fd, HV_X86_RBX);
1467 RRX(env, R_ECX) = rreg(cpu->hvf_fd, HV_X86_RCX);
1468 RRX(env, R_EDX) = rreg(cpu->hvf_fd, HV_X86_RDX);
1469 RRX(env, R_ESI) = rreg(cpu->hvf_fd, HV_X86_RSI);
1470 RRX(env, R_EDI) = rreg(cpu->hvf_fd, HV_X86_RDI);
1471 RRX(env, R_ESP) = rreg(cpu->hvf_fd, HV_X86_RSP);
1472 RRX(env, R_EBP) = rreg(cpu->hvf_fd, HV_X86_RBP);
1473 for (i = 8; i < 16; i++) {
1474 RRX(env, i) = rreg(cpu->hvf_fd, HV_X86_RAX + i);
1475 }
1476
1477 RFLAGS(env) = rreg(cpu->hvf_fd, HV_X86_RFLAGS);
1478 rflags_to_lflags(env);
1479 RIP(env) = rreg(cpu->hvf_fd, HV_X86_RIP);
1480 }
1481
1482 void store_regs(struct CPUState *cpu)
1483 {
1484 X86CPU *x86_cpu = X86_CPU(cpu);
1485 CPUX86State *env = &x86_cpu->env;
1486
1487 int i = 0;
1488 wreg(cpu->hvf_fd, HV_X86_RAX, RAX(env));
1489 wreg(cpu->hvf_fd, HV_X86_RBX, RBX(env));
1490 wreg(cpu->hvf_fd, HV_X86_RCX, RCX(env));
1491 wreg(cpu->hvf_fd, HV_X86_RDX, RDX(env));
1492 wreg(cpu->hvf_fd, HV_X86_RSI, RSI(env));
1493 wreg(cpu->hvf_fd, HV_X86_RDI, RDI(env));
1494 wreg(cpu->hvf_fd, HV_X86_RBP, RBP(env));
1495 wreg(cpu->hvf_fd, HV_X86_RSP, RSP(env));
1496 for (i = 8; i < 16; i++) {
1497 wreg(cpu->hvf_fd, HV_X86_RAX + i, RRX(env, i));
1498 }
1499
1500 lflags_to_rflags(env);
1501 wreg(cpu->hvf_fd, HV_X86_RFLAGS, RFLAGS(env));
1502 macvm_set_rip(cpu, RIP(env));
1503 }
1504
1505 bool exec_instruction(struct CPUX86State *env, struct x86_decode *ins)
1506 {
1507 /*if (hvf_vcpu_id(cpu))
1508 printf("%d, %llx: exec_instruction %s\n", hvf_vcpu_id(cpu), RIP(cpu),
1509 decode_cmd_to_string(ins->cmd));*/
1510
1511 if (!_cmd_handler[ins->cmd].handler) {
1512 printf("Unimplemented handler (%llx) for %d (%x %x) \n", RIP(env),
1513 ins->cmd, ins->opcode[0],
1514 ins->opcode_len > 1 ? ins->opcode[1] : 0);
1515 RIP(env) += ins->len;
1516 return true;
1517 }
1518
1519 _cmd_handler[ins->cmd].handler(env, ins);
1520 return true;
1521 }
1522
1523 void init_emu()
1524 {
1525 init_cmd_handler();
1526 }