]> git.proxmox.com Git - qemu.git/blame - tcg/i386/tcg-target.c
tcg: optimize logical operations
[qemu.git] / tcg / i386 / tcg-target.c
CommitLineData
c896fe29
FB
1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
d4a9eb1f
BS
24
25#ifndef NDEBUG
26static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
c896fe29
FB
27 "%eax",
28 "%ecx",
29 "%edx",
30 "%ebx",
31 "%esp",
32 "%ebp",
33 "%esi",
34 "%edi",
35};
d4a9eb1f 36#endif
c896fe29 37
d4a9eb1f 38static const int tcg_target_reg_alloc_order[] = {
c896fe29
FB
39 TCG_REG_EAX,
40 TCG_REG_EDX,
41 TCG_REG_ECX,
42 TCG_REG_EBX,
43 TCG_REG_ESI,
44 TCG_REG_EDI,
45 TCG_REG_EBP,
c896fe29
FB
46};
47
d4a9eb1f
BS
48static const int tcg_target_call_iarg_regs[3] = { TCG_REG_EAX, TCG_REG_EDX, TCG_REG_ECX };
49static const int tcg_target_call_oarg_regs[2] = { TCG_REG_EAX, TCG_REG_EDX };
c896fe29 50
b03cce8e
FB
51static uint8_t *tb_ret_addr;
52
c896fe29 53static void patch_reloc(uint8_t *code_ptr, int type,
f54b3f92 54 tcg_target_long value, tcg_target_long addend)
c896fe29 55{
f54b3f92 56 value += addend;
c896fe29
FB
57 switch(type) {
58 case R_386_32:
59 *(uint32_t *)code_ptr = value;
60 break;
61 case R_386_PC32:
62 *(uint32_t *)code_ptr = value - (long)code_ptr;
63 break;
64 default:
65 tcg_abort();
66 }
67}
68
69/* maximum number of register used for input function arguments */
70static inline int tcg_target_get_call_iarg_regs_count(int flags)
71{
72 flags &= TCG_CALL_TYPE_MASK;
73 switch(flags) {
74 case TCG_CALL_TYPE_STD:
75 return 0;
76 case TCG_CALL_TYPE_REGPARM_1:
77 case TCG_CALL_TYPE_REGPARM_2:
78 case TCG_CALL_TYPE_REGPARM:
79 return flags - TCG_CALL_TYPE_REGPARM_1 + 1;
80 default:
81 tcg_abort();
82 }
83}
84
85/* parse target specific constraints */
d4a9eb1f 86static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
c896fe29
FB
87{
88 const char *ct_str;
89
90 ct_str = *pct_str;
91 switch(ct_str[0]) {
92 case 'a':
93 ct->ct |= TCG_CT_REG;
94 tcg_regset_set_reg(ct->u.regs, TCG_REG_EAX);
95 break;
96 case 'b':
97 ct->ct |= TCG_CT_REG;
98 tcg_regset_set_reg(ct->u.regs, TCG_REG_EBX);
99 break;
100 case 'c':
101 ct->ct |= TCG_CT_REG;
102 tcg_regset_set_reg(ct->u.regs, TCG_REG_ECX);
103 break;
104 case 'd':
105 ct->ct |= TCG_CT_REG;
106 tcg_regset_set_reg(ct->u.regs, TCG_REG_EDX);
107 break;
108 case 'S':
109 ct->ct |= TCG_CT_REG;
110 tcg_regset_set_reg(ct->u.regs, TCG_REG_ESI);
111 break;
112 case 'D':
113 ct->ct |= TCG_CT_REG;
114 tcg_regset_set_reg(ct->u.regs, TCG_REG_EDI);
115 break;
116 case 'q':
117 ct->ct |= TCG_CT_REG;
118 tcg_regset_set32(ct->u.regs, 0, 0xf);
119 break;
120 case 'r':
121 ct->ct |= TCG_CT_REG;
122 tcg_regset_set32(ct->u.regs, 0, 0xff);
123 break;
124
125 /* qemu_ld/st address constraint */
126 case 'L':
127 ct->ct |= TCG_CT_REG;
128 tcg_regset_set32(ct->u.regs, 0, 0xff);
129 tcg_regset_reset_reg(ct->u.regs, TCG_REG_EAX);
130 tcg_regset_reset_reg(ct->u.regs, TCG_REG_EDX);
131 break;
132 default:
133 return -1;
134 }
135 ct_str++;
136 *pct_str = ct_str;
137 return 0;
138}
139
140/* test if a constant matches the constraint */
141static inline int tcg_target_const_match(tcg_target_long val,
142 const TCGArgConstraint *arg_ct)
143{
144 int ct;
145 ct = arg_ct->ct;
146 if (ct & TCG_CT_CONST)
147 return 1;
148 else
149 return 0;
150}
151
152#define ARITH_ADD 0
153#define ARITH_OR 1
154#define ARITH_ADC 2
155#define ARITH_SBB 3
156#define ARITH_AND 4
157#define ARITH_SUB 5
158#define ARITH_XOR 6
159#define ARITH_CMP 7
160
161#define SHIFT_SHL 4
162#define SHIFT_SHR 5
163#define SHIFT_SAR 7
164
165#define JCC_JMP (-1)
166#define JCC_JO 0x0
167#define JCC_JNO 0x1
168#define JCC_JB 0x2
169#define JCC_JAE 0x3
170#define JCC_JE 0x4
171#define JCC_JNE 0x5
172#define JCC_JBE 0x6
173#define JCC_JA 0x7
174#define JCC_JS 0x8
175#define JCC_JNS 0x9
176#define JCC_JP 0xa
177#define JCC_JNP 0xb
178#define JCC_JL 0xc
179#define JCC_JGE 0xd
180#define JCC_JLE 0xe
181#define JCC_JG 0xf
182
183#define P_EXT 0x100 /* 0x0f opcode prefix */
184
185static const uint8_t tcg_cond_to_jcc[10] = {
186 [TCG_COND_EQ] = JCC_JE,
187 [TCG_COND_NE] = JCC_JNE,
188 [TCG_COND_LT] = JCC_JL,
189 [TCG_COND_GE] = JCC_JGE,
190 [TCG_COND_LE] = JCC_JLE,
191 [TCG_COND_GT] = JCC_JG,
192 [TCG_COND_LTU] = JCC_JB,
193 [TCG_COND_GEU] = JCC_JAE,
194 [TCG_COND_LEU] = JCC_JBE,
195 [TCG_COND_GTU] = JCC_JA,
196};
197
198static inline void tcg_out_opc(TCGContext *s, int opc)
199{
200 if (opc & P_EXT)
201 tcg_out8(s, 0x0f);
202 tcg_out8(s, opc);
203}
204
205static inline void tcg_out_modrm(TCGContext *s, int opc, int r, int rm)
206{
207 tcg_out_opc(s, opc);
208 tcg_out8(s, 0xc0 | (r << 3) | rm);
209}
210
211/* rm == -1 means no register index */
212static inline void tcg_out_modrm_offset(TCGContext *s, int opc, int r, int rm,
213 int32_t offset)
214{
215 tcg_out_opc(s, opc);
216 if (rm == -1) {
217 tcg_out8(s, 0x05 | (r << 3));
218 tcg_out32(s, offset);
219 } else if (offset == 0 && rm != TCG_REG_EBP) {
220 if (rm == TCG_REG_ESP) {
221 tcg_out8(s, 0x04 | (r << 3));
222 tcg_out8(s, 0x24);
223 } else {
224 tcg_out8(s, 0x00 | (r << 3) | rm);
225 }
226 } else if ((int8_t)offset == offset) {
227 if (rm == TCG_REG_ESP) {
228 tcg_out8(s, 0x44 | (r << 3));
229 tcg_out8(s, 0x24);
230 } else {
231 tcg_out8(s, 0x40 | (r << 3) | rm);
232 }
233 tcg_out8(s, offset);
234 } else {
235 if (rm == TCG_REG_ESP) {
236 tcg_out8(s, 0x84 | (r << 3));
237 tcg_out8(s, 0x24);
238 } else {
239 tcg_out8(s, 0x80 | (r << 3) | rm);
240 }
241 tcg_out32(s, offset);
242 }
243}
244
245static inline void tcg_out_mov(TCGContext *s, int ret, int arg)
246{
247 if (arg != ret)
248 tcg_out_modrm(s, 0x8b, ret, arg);
249}
250
251static inline void tcg_out_movi(TCGContext *s, TCGType type,
252 int ret, int32_t arg)
253{
254 if (arg == 0) {
255 /* xor r0,r0 */
256 tcg_out_modrm(s, 0x01 | (ARITH_XOR << 3), ret, ret);
257 } else {
258 tcg_out8(s, 0xb8 + ret);
259 tcg_out32(s, arg);
260 }
261}
262
e4d5434c
BS
263static inline void tcg_out_ld(TCGContext *s, TCGType type, int ret,
264 int arg1, tcg_target_long arg2)
c896fe29
FB
265{
266 /* movl */
267 tcg_out_modrm_offset(s, 0x8b, ret, arg1, arg2);
268}
269
e4d5434c
BS
270static inline void tcg_out_st(TCGContext *s, TCGType type, int arg,
271 int arg1, tcg_target_long arg2)
c896fe29
FB
272{
273 /* movl */
274 tcg_out_modrm_offset(s, 0x89, arg, arg1, arg2);
275}
276
277static inline void tgen_arithi(TCGContext *s, int c, int r0, int32_t val)
278{
279 if (val == (int8_t)val) {
280 tcg_out_modrm(s, 0x83, c, r0);
281 tcg_out8(s, val);
282 } else {
283 tcg_out_modrm(s, 0x81, c, r0);
284 tcg_out32(s, val);
285 }
286}
287
3e9a474e 288static void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val)
c896fe29
FB
289{
290 if (val != 0)
291 tgen_arithi(s, ARITH_ADD, reg, val);
292}
293
294static void tcg_out_jxx(TCGContext *s, int opc, int label_index)
295{
296 int32_t val, val1;
297 TCGLabel *l = &s->labels[label_index];
298
299 if (l->has_value) {
300 val = l->u.value - (tcg_target_long)s->code_ptr;
301 val1 = val - 2;
302 if ((int8_t)val1 == val1) {
303 if (opc == -1)
304 tcg_out8(s, 0xeb);
305 else
306 tcg_out8(s, 0x70 + opc);
307 tcg_out8(s, val1);
308 } else {
309 if (opc == -1) {
310 tcg_out8(s, 0xe9);
311 tcg_out32(s, val - 5);
312 } else {
313 tcg_out8(s, 0x0f);
314 tcg_out8(s, 0x80 + opc);
315 tcg_out32(s, val - 6);
316 }
317 }
318 } else {
319 if (opc == -1) {
320 tcg_out8(s, 0xe9);
321 } else {
322 tcg_out8(s, 0x0f);
323 tcg_out8(s, 0x80 + opc);
324 }
325 tcg_out_reloc(s, s->code_ptr, R_386_PC32, label_index, -4);
623e265c 326 s->code_ptr += 4;
c896fe29
FB
327 }
328}
329
330static void tcg_out_brcond(TCGContext *s, int cond,
331 TCGArg arg1, TCGArg arg2, int const_arg2,
332 int label_index)
333{
c896fe29
FB
334 if (const_arg2) {
335 if (arg2 == 0) {
c896fe29
FB
336 /* test r, r */
337 tcg_out_modrm(s, 0x85, arg1, arg1);
c896fe29 338 } else {
c896fe29 339 tgen_arithi(s, ARITH_CMP, arg1, arg2);
c896fe29
FB
340 }
341 } else {
bb210e78 342 tcg_out_modrm(s, 0x01 | (ARITH_CMP << 3), arg2, arg1);
c896fe29 343 }
affa3264 344 tcg_out_jxx(s, tcg_cond_to_jcc[cond], label_index);
c896fe29
FB
345}
346
347/* XXX: we implement it at the target level to avoid having to
348 handle cross basic blocks temporaries */
349static void tcg_out_brcond2(TCGContext *s,
350 const TCGArg *args, const int *const_args)
351{
352 int label_next;
353 label_next = gen_new_label();
354 switch(args[4]) {
355 case TCG_COND_EQ:
356 tcg_out_brcond(s, TCG_COND_NE, args[0], args[2], const_args[2], label_next);
357 tcg_out_brcond(s, TCG_COND_EQ, args[1], args[3], const_args[3], args[5]);
358 break;
359 case TCG_COND_NE:
360 tcg_out_brcond(s, TCG_COND_NE, args[0], args[2], const_args[2], args[5]);
bb210e78 361 tcg_out_brcond(s, TCG_COND_NE, args[1], args[3], const_args[3], args[5]);
c896fe29
FB
362 break;
363 case TCG_COND_LT:
364 tcg_out_brcond(s, TCG_COND_LT, args[1], args[3], const_args[3], args[5]);
affa3264 365 tcg_out_jxx(s, JCC_JNE, label_next);
d643ccca 366 tcg_out_brcond(s, TCG_COND_LTU, args[0], args[2], const_args[2], args[5]);
c896fe29
FB
367 break;
368 case TCG_COND_LE:
369 tcg_out_brcond(s, TCG_COND_LT, args[1], args[3], const_args[3], args[5]);
affa3264 370 tcg_out_jxx(s, JCC_JNE, label_next);
d643ccca 371 tcg_out_brcond(s, TCG_COND_LEU, args[0], args[2], const_args[2], args[5]);
c896fe29
FB
372 break;
373 case TCG_COND_GT:
374 tcg_out_brcond(s, TCG_COND_GT, args[1], args[3], const_args[3], args[5]);
affa3264 375 tcg_out_jxx(s, JCC_JNE, label_next);
d643ccca 376 tcg_out_brcond(s, TCG_COND_GTU, args[0], args[2], const_args[2], args[5]);
c896fe29
FB
377 break;
378 case TCG_COND_GE:
379 tcg_out_brcond(s, TCG_COND_GT, args[1], args[3], const_args[3], args[5]);
affa3264 380 tcg_out_jxx(s, JCC_JNE, label_next);
d643ccca 381 tcg_out_brcond(s, TCG_COND_GEU, args[0], args[2], const_args[2], args[5]);
c896fe29
FB
382 break;
383 case TCG_COND_LTU:
384 tcg_out_brcond(s, TCG_COND_LTU, args[1], args[3], const_args[3], args[5]);
affa3264 385 tcg_out_jxx(s, JCC_JNE, label_next);
c896fe29
FB
386 tcg_out_brcond(s, TCG_COND_LTU, args[0], args[2], const_args[2], args[5]);
387 break;
388 case TCG_COND_LEU:
389 tcg_out_brcond(s, TCG_COND_LTU, args[1], args[3], const_args[3], args[5]);
affa3264 390 tcg_out_jxx(s, JCC_JNE, label_next);
c896fe29
FB
391 tcg_out_brcond(s, TCG_COND_LEU, args[0], args[2], const_args[2], args[5]);
392 break;
393 case TCG_COND_GTU:
394 tcg_out_brcond(s, TCG_COND_GTU, args[1], args[3], const_args[3], args[5]);
affa3264 395 tcg_out_jxx(s, JCC_JNE, label_next);
c896fe29
FB
396 tcg_out_brcond(s, TCG_COND_GTU, args[0], args[2], const_args[2], args[5]);
397 break;
398 case TCG_COND_GEU:
399 tcg_out_brcond(s, TCG_COND_GTU, args[1], args[3], const_args[3], args[5]);
affa3264 400 tcg_out_jxx(s, JCC_JNE, label_next);
c896fe29
FB
401 tcg_out_brcond(s, TCG_COND_GEU, args[0], args[2], const_args[2], args[5]);
402 break;
403 default:
404 tcg_abort();
405 }
406 tcg_out_label(s, label_next, (tcg_target_long)s->code_ptr);
407}
408
409#if defined(CONFIG_SOFTMMU)
79383c9c
BS
410
411#include "../../softmmu_defs.h"
c896fe29
FB
412
413static void *qemu_ld_helpers[4] = {
414 __ldb_mmu,
415 __ldw_mmu,
416 __ldl_mmu,
417 __ldq_mmu,
418};
419
420static void *qemu_st_helpers[4] = {
421 __stb_mmu,
422 __stw_mmu,
423 __stl_mmu,
424 __stq_mmu,
425};
426#endif
427
428/* XXX: qemu_ld and qemu_st could be modified to clobber only EDX and
429 EAX. It will be useful once fixed registers globals are less
430 common. */
431static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args,
432 int opc)
433{
434 int addr_reg, data_reg, data_reg2, r0, r1, mem_index, s_bits, bswap;
435#if defined(CONFIG_SOFTMMU)
436 uint8_t *label1_ptr, *label2_ptr;
437#endif
438#if TARGET_LONG_BITS == 64
439#if defined(CONFIG_SOFTMMU)
440 uint8_t *label3_ptr;
441#endif
442 int addr_reg2;
443#endif
444
445 data_reg = *args++;
446 if (opc == 3)
447 data_reg2 = *args++;
448 else
449 data_reg2 = 0;
450 addr_reg = *args++;
451#if TARGET_LONG_BITS == 64
452 addr_reg2 = *args++;
453#endif
454 mem_index = *args;
455 s_bits = opc & 3;
456
457 r0 = TCG_REG_EAX;
458 r1 = TCG_REG_EDX;
459
460#if defined(CONFIG_SOFTMMU)
461 tcg_out_mov(s, r1, addr_reg);
462
463 tcg_out_mov(s, r0, addr_reg);
464
465 tcg_out_modrm(s, 0xc1, 5, r1); /* shr $x, r1 */
466 tcg_out8(s, TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
467
468 tcg_out_modrm(s, 0x81, 4, r0); /* andl $x, r0 */
469 tcg_out32(s, TARGET_PAGE_MASK | ((1 << s_bits) - 1));
470
471 tcg_out_modrm(s, 0x81, 4, r1); /* andl $x, r1 */
472 tcg_out32(s, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS);
473
474 tcg_out_opc(s, 0x8d); /* lea offset(r1, %ebp), r1 */
475 tcg_out8(s, 0x80 | (r1 << 3) | 0x04);
476 tcg_out8(s, (5 << 3) | r1);
477 tcg_out32(s, offsetof(CPUState, tlb_table[mem_index][0].addr_read));
478
479 /* cmp 0(r1), r0 */
480 tcg_out_modrm_offset(s, 0x3b, r0, r1, 0);
481
482 tcg_out_mov(s, r0, addr_reg);
483
484#if TARGET_LONG_BITS == 32
485 /* je label1 */
486 tcg_out8(s, 0x70 + JCC_JE);
487 label1_ptr = s->code_ptr;
488 s->code_ptr++;
489#else
490 /* jne label3 */
491 tcg_out8(s, 0x70 + JCC_JNE);
492 label3_ptr = s->code_ptr;
493 s->code_ptr++;
494
495 /* cmp 4(r1), addr_reg2 */
496 tcg_out_modrm_offset(s, 0x3b, addr_reg2, r1, 4);
497
498 /* je label1 */
499 tcg_out8(s, 0x70 + JCC_JE);
500 label1_ptr = s->code_ptr;
501 s->code_ptr++;
502
503 /* label3: */
504 *label3_ptr = s->code_ptr - label3_ptr - 1;
505#endif
506
507 /* XXX: move that code at the end of the TB */
508#if TARGET_LONG_BITS == 32
509 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_EDX, mem_index);
510#else
511 tcg_out_mov(s, TCG_REG_EDX, addr_reg2);
512 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_ECX, mem_index);
513#endif
514 tcg_out8(s, 0xe8);
515 tcg_out32(s, (tcg_target_long)qemu_ld_helpers[s_bits] -
516 (tcg_target_long)s->code_ptr - 4);
517
518 switch(opc) {
519 case 0 | 4:
520 /* movsbl */
521 tcg_out_modrm(s, 0xbe | P_EXT, data_reg, TCG_REG_EAX);
522 break;
523 case 1 | 4:
524 /* movswl */
525 tcg_out_modrm(s, 0xbf | P_EXT, data_reg, TCG_REG_EAX);
526 break;
527 case 0:
9db3ba4d
AJ
528 /* movzbl */
529 tcg_out_modrm(s, 0xb6 | P_EXT, data_reg, TCG_REG_EAX);
530 break;
c896fe29 531 case 1:
9db3ba4d
AJ
532 /* movzwl */
533 tcg_out_modrm(s, 0xb7 | P_EXT, data_reg, TCG_REG_EAX);
534 break;
c896fe29
FB
535 case 2:
536 default:
537 tcg_out_mov(s, data_reg, TCG_REG_EAX);
538 break;
539 case 3:
540 if (data_reg == TCG_REG_EDX) {
541 tcg_out_opc(s, 0x90 + TCG_REG_EDX); /* xchg %edx, %eax */
542 tcg_out_mov(s, data_reg2, TCG_REG_EAX);
543 } else {
544 tcg_out_mov(s, data_reg, TCG_REG_EAX);
545 tcg_out_mov(s, data_reg2, TCG_REG_EDX);
546 }
547 break;
548 }
549
550 /* jmp label2 */
551 tcg_out8(s, 0xeb);
552 label2_ptr = s->code_ptr;
553 s->code_ptr++;
554
555 /* label1: */
556 *label1_ptr = s->code_ptr - label1_ptr - 1;
557
558 /* add x(r1), r0 */
559 tcg_out_modrm_offset(s, 0x03, r0, r1, offsetof(CPUTLBEntry, addend) -
560 offsetof(CPUTLBEntry, addr_read));
561#else
562 r0 = addr_reg;
563#endif
564
565#ifdef TARGET_WORDS_BIGENDIAN
566 bswap = 1;
567#else
568 bswap = 0;
569#endif
570 switch(opc) {
571 case 0:
572 /* movzbl */
573 tcg_out_modrm_offset(s, 0xb6 | P_EXT, data_reg, r0, 0);
574 break;
575 case 0 | 4:
576 /* movsbl */
577 tcg_out_modrm_offset(s, 0xbe | P_EXT, data_reg, r0, 0);
578 break;
579 case 1:
580 /* movzwl */
581 tcg_out_modrm_offset(s, 0xb7 | P_EXT, data_reg, r0, 0);
582 if (bswap) {
583 /* rolw $8, data_reg */
584 tcg_out8(s, 0x66);
585 tcg_out_modrm(s, 0xc1, 0, data_reg);
586 tcg_out8(s, 8);
587 }
588 break;
589 case 1 | 4:
590 /* movswl */
591 tcg_out_modrm_offset(s, 0xbf | P_EXT, data_reg, r0, 0);
592 if (bswap) {
593 /* rolw $8, data_reg */
594 tcg_out8(s, 0x66);
595 tcg_out_modrm(s, 0xc1, 0, data_reg);
596 tcg_out8(s, 8);
597
598 /* movswl data_reg, data_reg */
599 tcg_out_modrm(s, 0xbf | P_EXT, data_reg, data_reg);
600 }
601 break;
602 case 2:
603 /* movl (r0), data_reg */
604 tcg_out_modrm_offset(s, 0x8b, data_reg, r0, 0);
605 if (bswap) {
606 /* bswap */
607 tcg_out_opc(s, (0xc8 + data_reg) | P_EXT);
608 }
609 break;
610 case 3:
611 /* XXX: could be nicer */
612 if (r0 == data_reg) {
613 r1 = TCG_REG_EDX;
614 if (r1 == data_reg)
615 r1 = TCG_REG_EAX;
616 tcg_out_mov(s, r1, r0);
617 r0 = r1;
618 }
619 if (!bswap) {
620 tcg_out_modrm_offset(s, 0x8b, data_reg, r0, 0);
621 tcg_out_modrm_offset(s, 0x8b, data_reg2, r0, 4);
622 } else {
623 tcg_out_modrm_offset(s, 0x8b, data_reg, r0, 4);
624 tcg_out_opc(s, (0xc8 + data_reg) | P_EXT);
625
626 tcg_out_modrm_offset(s, 0x8b, data_reg2, r0, 0);
627 /* bswap */
628 tcg_out_opc(s, (0xc8 + data_reg2) | P_EXT);
629 }
630 break;
631 default:
632 tcg_abort();
633 }
634
635#if defined(CONFIG_SOFTMMU)
636 /* label2: */
637 *label2_ptr = s->code_ptr - label2_ptr - 1;
638#endif
639}
640
641
642static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args,
643 int opc)
644{
645 int addr_reg, data_reg, data_reg2, r0, r1, mem_index, s_bits, bswap;
646#if defined(CONFIG_SOFTMMU)
647 uint8_t *label1_ptr, *label2_ptr;
648#endif
649#if TARGET_LONG_BITS == 64
650#if defined(CONFIG_SOFTMMU)
651 uint8_t *label3_ptr;
652#endif
653 int addr_reg2;
654#endif
655
656 data_reg = *args++;
657 if (opc == 3)
658 data_reg2 = *args++;
659 else
660 data_reg2 = 0;
661 addr_reg = *args++;
662#if TARGET_LONG_BITS == 64
663 addr_reg2 = *args++;
664#endif
665 mem_index = *args;
666
667 s_bits = opc;
668
669 r0 = TCG_REG_EAX;
670 r1 = TCG_REG_EDX;
671
672#if defined(CONFIG_SOFTMMU)
673 tcg_out_mov(s, r1, addr_reg);
674
675 tcg_out_mov(s, r0, addr_reg);
676
677 tcg_out_modrm(s, 0xc1, 5, r1); /* shr $x, r1 */
678 tcg_out8(s, TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
679
680 tcg_out_modrm(s, 0x81, 4, r0); /* andl $x, r0 */
681 tcg_out32(s, TARGET_PAGE_MASK | ((1 << s_bits) - 1));
682
683 tcg_out_modrm(s, 0x81, 4, r1); /* andl $x, r1 */
684 tcg_out32(s, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS);
685
686 tcg_out_opc(s, 0x8d); /* lea offset(r1, %ebp), r1 */
687 tcg_out8(s, 0x80 | (r1 << 3) | 0x04);
688 tcg_out8(s, (5 << 3) | r1);
689 tcg_out32(s, offsetof(CPUState, tlb_table[mem_index][0].addr_write));
690
691 /* cmp 0(r1), r0 */
692 tcg_out_modrm_offset(s, 0x3b, r0, r1, 0);
693
694 tcg_out_mov(s, r0, addr_reg);
695
696#if TARGET_LONG_BITS == 32
697 /* je label1 */
698 tcg_out8(s, 0x70 + JCC_JE);
699 label1_ptr = s->code_ptr;
700 s->code_ptr++;
701#else
702 /* jne label3 */
703 tcg_out8(s, 0x70 + JCC_JNE);
704 label3_ptr = s->code_ptr;
705 s->code_ptr++;
706
707 /* cmp 4(r1), addr_reg2 */
708 tcg_out_modrm_offset(s, 0x3b, addr_reg2, r1, 4);
709
710 /* je label1 */
711 tcg_out8(s, 0x70 + JCC_JE);
712 label1_ptr = s->code_ptr;
713 s->code_ptr++;
714
715 /* label3: */
716 *label3_ptr = s->code_ptr - label3_ptr - 1;
717#endif
718
719 /* XXX: move that code at the end of the TB */
720#if TARGET_LONG_BITS == 32
721 if (opc == 3) {
722 tcg_out_mov(s, TCG_REG_EDX, data_reg);
723 tcg_out_mov(s, TCG_REG_ECX, data_reg2);
724 tcg_out8(s, 0x6a); /* push Ib */
725 tcg_out8(s, mem_index);
726 tcg_out8(s, 0xe8);
727 tcg_out32(s, (tcg_target_long)qemu_st_helpers[s_bits] -
728 (tcg_target_long)s->code_ptr - 4);
729 tcg_out_addi(s, TCG_REG_ESP, 4);
730 } else {
731 switch(opc) {
732 case 0:
733 /* movzbl */
734 tcg_out_modrm(s, 0xb6 | P_EXT, TCG_REG_EDX, data_reg);
735 break;
736 case 1:
737 /* movzwl */
738 tcg_out_modrm(s, 0xb7 | P_EXT, TCG_REG_EDX, data_reg);
739 break;
740 case 2:
741 tcg_out_mov(s, TCG_REG_EDX, data_reg);
742 break;
743 }
744 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_ECX, mem_index);
745 tcg_out8(s, 0xe8);
746 tcg_out32(s, (tcg_target_long)qemu_st_helpers[s_bits] -
747 (tcg_target_long)s->code_ptr - 4);
748 }
749#else
750 if (opc == 3) {
751 tcg_out_mov(s, TCG_REG_EDX, addr_reg2);
752 tcg_out8(s, 0x6a); /* push Ib */
753 tcg_out8(s, mem_index);
754 tcg_out_opc(s, 0x50 + data_reg2); /* push */
755 tcg_out_opc(s, 0x50 + data_reg); /* push */
756 tcg_out8(s, 0xe8);
757 tcg_out32(s, (tcg_target_long)qemu_st_helpers[s_bits] -
758 (tcg_target_long)s->code_ptr - 4);
759 tcg_out_addi(s, TCG_REG_ESP, 12);
760 } else {
761 tcg_out_mov(s, TCG_REG_EDX, addr_reg2);
762 switch(opc) {
763 case 0:
764 /* movzbl */
765 tcg_out_modrm(s, 0xb6 | P_EXT, TCG_REG_ECX, data_reg);
766 break;
767 case 1:
768 /* movzwl */
769 tcg_out_modrm(s, 0xb7 | P_EXT, TCG_REG_ECX, data_reg);
770 break;
771 case 2:
772 tcg_out_mov(s, TCG_REG_ECX, data_reg);
773 break;
774 }
775 tcg_out8(s, 0x6a); /* push Ib */
776 tcg_out8(s, mem_index);
777 tcg_out8(s, 0xe8);
778 tcg_out32(s, (tcg_target_long)qemu_st_helpers[s_bits] -
779 (tcg_target_long)s->code_ptr - 4);
780 tcg_out_addi(s, TCG_REG_ESP, 4);
781 }
782#endif
783
784 /* jmp label2 */
785 tcg_out8(s, 0xeb);
786 label2_ptr = s->code_ptr;
787 s->code_ptr++;
788
789 /* label1: */
790 *label1_ptr = s->code_ptr - label1_ptr - 1;
791
792 /* add x(r1), r0 */
793 tcg_out_modrm_offset(s, 0x03, r0, r1, offsetof(CPUTLBEntry, addend) -
794 offsetof(CPUTLBEntry, addr_write));
795#else
796 r0 = addr_reg;
797#endif
798
799#ifdef TARGET_WORDS_BIGENDIAN
800 bswap = 1;
801#else
802 bswap = 0;
803#endif
804 switch(opc) {
805 case 0:
806 /* movb */
807 tcg_out_modrm_offset(s, 0x88, data_reg, r0, 0);
808 break;
809 case 1:
810 if (bswap) {
811 tcg_out_mov(s, r1, data_reg);
812 tcg_out8(s, 0x66); /* rolw $8, %ecx */
813 tcg_out_modrm(s, 0xc1, 0, r1);
814 tcg_out8(s, 8);
815 data_reg = r1;
816 }
817 /* movw */
818 tcg_out8(s, 0x66);
819 tcg_out_modrm_offset(s, 0x89, data_reg, r0, 0);
820 break;
821 case 2:
822 if (bswap) {
823 tcg_out_mov(s, r1, data_reg);
824 /* bswap data_reg */
825 tcg_out_opc(s, (0xc8 + r1) | P_EXT);
826 data_reg = r1;
827 }
828 /* movl */
829 tcg_out_modrm_offset(s, 0x89, data_reg, r0, 0);
830 break;
831 case 3:
832 if (bswap) {
833 tcg_out_mov(s, r1, data_reg2);
834 /* bswap data_reg */
835 tcg_out_opc(s, (0xc8 + r1) | P_EXT);
836 tcg_out_modrm_offset(s, 0x89, r1, r0, 0);
837 tcg_out_mov(s, r1, data_reg);
838 /* bswap data_reg */
839 tcg_out_opc(s, (0xc8 + r1) | P_EXT);
840 tcg_out_modrm_offset(s, 0x89, r1, r0, 4);
841 } else {
842 tcg_out_modrm_offset(s, 0x89, data_reg, r0, 0);
843 tcg_out_modrm_offset(s, 0x89, data_reg2, r0, 4);
844 }
845 break;
846 default:
847 tcg_abort();
848 }
849
850#if defined(CONFIG_SOFTMMU)
851 /* label2: */
852 *label2_ptr = s->code_ptr - label2_ptr - 1;
853#endif
854}
855
856static inline void tcg_out_op(TCGContext *s, int opc,
857 const TCGArg *args, const int *const_args)
858{
859 int c;
860
861 switch(opc) {
862 case INDEX_op_exit_tb:
863 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_EAX, args[0]);
b03cce8e
FB
864 tcg_out8(s, 0xe9); /* jmp tb_ret_addr */
865 tcg_out32(s, tb_ret_addr - s->code_ptr - 4);
c896fe29
FB
866 break;
867 case INDEX_op_goto_tb:
868 if (s->tb_jmp_offset) {
869 /* direct jump method */
870 tcg_out8(s, 0xe9); /* jmp im */
871 s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
872 tcg_out32(s, 0);
873 } else {
874 /* indirect jump method */
875 /* jmp Ev */
876 tcg_out_modrm_offset(s, 0xff, 4, -1,
877 (tcg_target_long)(s->tb_next + args[0]));
878 }
879 s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf;
880 break;
881 case INDEX_op_call:
882 if (const_args[0]) {
883 tcg_out8(s, 0xe8);
884 tcg_out32(s, args[0] - (tcg_target_long)s->code_ptr - 4);
885 } else {
886 tcg_out_modrm(s, 0xff, 2, args[0]);
887 }
888 break;
889 case INDEX_op_jmp:
890 if (const_args[0]) {
891 tcg_out8(s, 0xe9);
892 tcg_out32(s, args[0] - (tcg_target_long)s->code_ptr - 4);
893 } else {
894 tcg_out_modrm(s, 0xff, 4, args[0]);
895 }
896 break;
897 case INDEX_op_br:
898 tcg_out_jxx(s, JCC_JMP, args[0]);
899 break;
900 case INDEX_op_movi_i32:
901 tcg_out_movi(s, TCG_TYPE_I32, args[0], args[1]);
902 break;
903 case INDEX_op_ld8u_i32:
904 /* movzbl */
905 tcg_out_modrm_offset(s, 0xb6 | P_EXT, args[0], args[1], args[2]);
906 break;
907 case INDEX_op_ld8s_i32:
908 /* movsbl */
909 tcg_out_modrm_offset(s, 0xbe | P_EXT, args[0], args[1], args[2]);
910 break;
911 case INDEX_op_ld16u_i32:
912 /* movzwl */
913 tcg_out_modrm_offset(s, 0xb7 | P_EXT, args[0], args[1], args[2]);
914 break;
915 case INDEX_op_ld16s_i32:
916 /* movswl */
917 tcg_out_modrm_offset(s, 0xbf | P_EXT, args[0], args[1], args[2]);
918 break;
919 case INDEX_op_ld_i32:
920 /* movl */
921 tcg_out_modrm_offset(s, 0x8b, args[0], args[1], args[2]);
922 break;
923 case INDEX_op_st8_i32:
924 /* movb */
925 tcg_out_modrm_offset(s, 0x88, args[0], args[1], args[2]);
926 break;
927 case INDEX_op_st16_i32:
928 /* movw */
929 tcg_out8(s, 0x66);
930 tcg_out_modrm_offset(s, 0x89, args[0], args[1], args[2]);
931 break;
932 case INDEX_op_st_i32:
933 /* movl */
934 tcg_out_modrm_offset(s, 0x89, args[0], args[1], args[2]);
935 break;
936 case INDEX_op_sub_i32:
937 c = ARITH_SUB;
938 goto gen_arith;
939 case INDEX_op_and_i32:
940 c = ARITH_AND;
941 goto gen_arith;
942 case INDEX_op_or_i32:
943 c = ARITH_OR;
944 goto gen_arith;
945 case INDEX_op_xor_i32:
946 c = ARITH_XOR;
947 goto gen_arith;
948 case INDEX_op_add_i32:
949 c = ARITH_ADD;
950 gen_arith:
951 if (const_args[2]) {
952 tgen_arithi(s, c, args[0], args[2]);
953 } else {
954 tcg_out_modrm(s, 0x01 | (c << 3), args[2], args[0]);
955 }
956 break;
957 case INDEX_op_mul_i32:
958 if (const_args[2]) {
959 int32_t val;
960 val = args[2];
961 if (val == (int8_t)val) {
962 tcg_out_modrm(s, 0x6b, args[0], args[0]);
963 tcg_out8(s, val);
964 } else {
965 tcg_out_modrm(s, 0x69, args[0], args[0]);
966 tcg_out32(s, val);
967 }
968 } else {
969 tcg_out_modrm(s, 0xaf | P_EXT, args[0], args[2]);
970 }
971 break;
972 case INDEX_op_mulu2_i32:
973 tcg_out_modrm(s, 0xf7, 4, args[3]);
974 break;
975 case INDEX_op_div2_i32:
976 tcg_out_modrm(s, 0xf7, 7, args[4]);
977 break;
978 case INDEX_op_divu2_i32:
979 tcg_out_modrm(s, 0xf7, 6, args[4]);
980 break;
981 case INDEX_op_shl_i32:
982 c = SHIFT_SHL;
983 gen_shift32:
984 if (const_args[2]) {
985 if (args[2] == 1) {
986 tcg_out_modrm(s, 0xd1, c, args[0]);
987 } else {
988 tcg_out_modrm(s, 0xc1, c, args[0]);
989 tcg_out8(s, args[2]);
990 }
991 } else {
992 tcg_out_modrm(s, 0xd3, c, args[0]);
993 }
994 break;
995 case INDEX_op_shr_i32:
996 c = SHIFT_SHR;
997 goto gen_shift32;
998 case INDEX_op_sar_i32:
999 c = SHIFT_SAR;
1000 goto gen_shift32;
1001
1002 case INDEX_op_add2_i32:
1003 if (const_args[4])
1004 tgen_arithi(s, ARITH_ADD, args[0], args[4]);
1005 else
1006 tcg_out_modrm(s, 0x01 | (ARITH_ADD << 3), args[4], args[0]);
1007 if (const_args[5])
1008 tgen_arithi(s, ARITH_ADC, args[1], args[5]);
1009 else
1010 tcg_out_modrm(s, 0x01 | (ARITH_ADC << 3), args[5], args[1]);
1011 break;
1012 case INDEX_op_sub2_i32:
1013 if (const_args[4])
1014 tgen_arithi(s, ARITH_SUB, args[0], args[4]);
1015 else
1016 tcg_out_modrm(s, 0x01 | (ARITH_SUB << 3), args[4], args[0]);
1017 if (const_args[5])
1018 tgen_arithi(s, ARITH_SBB, args[1], args[5]);
1019 else
1020 tcg_out_modrm(s, 0x01 | (ARITH_SBB << 3), args[5], args[1]);
1021 break;
1022 case INDEX_op_brcond_i32:
1023 tcg_out_brcond(s, args[2], args[0], args[1], const_args[1], args[3]);
1024 break;
1025 case INDEX_op_brcond2_i32:
1026 tcg_out_brcond2(s, args, const_args);
1027 break;
1028
1029 case INDEX_op_qemu_ld8u:
1030 tcg_out_qemu_ld(s, args, 0);
1031 break;
1032 case INDEX_op_qemu_ld8s:
1033 tcg_out_qemu_ld(s, args, 0 | 4);
1034 break;
1035 case INDEX_op_qemu_ld16u:
1036 tcg_out_qemu_ld(s, args, 1);
1037 break;
1038 case INDEX_op_qemu_ld16s:
1039 tcg_out_qemu_ld(s, args, 1 | 4);
1040 break;
1041 case INDEX_op_qemu_ld32u:
1042 tcg_out_qemu_ld(s, args, 2);
1043 break;
1044 case INDEX_op_qemu_ld64:
1045 tcg_out_qemu_ld(s, args, 3);
1046 break;
1047
1048 case INDEX_op_qemu_st8:
1049 tcg_out_qemu_st(s, args, 0);
1050 break;
1051 case INDEX_op_qemu_st16:
1052 tcg_out_qemu_st(s, args, 1);
1053 break;
1054 case INDEX_op_qemu_st32:
1055 tcg_out_qemu_st(s, args, 2);
1056 break;
1057 case INDEX_op_qemu_st64:
1058 tcg_out_qemu_st(s, args, 3);
1059 break;
1060
1061 default:
1062 tcg_abort();
1063 }
1064}
1065
1066static const TCGTargetOpDef x86_op_defs[] = {
1067 { INDEX_op_exit_tb, { } },
1068 { INDEX_op_goto_tb, { } },
1069 { INDEX_op_call, { "ri" } },
1070 { INDEX_op_jmp, { "ri" } },
1071 { INDEX_op_br, { } },
1072 { INDEX_op_mov_i32, { "r", "r" } },
1073 { INDEX_op_movi_i32, { "r" } },
1074 { INDEX_op_ld8u_i32, { "r", "r" } },
1075 { INDEX_op_ld8s_i32, { "r", "r" } },
1076 { INDEX_op_ld16u_i32, { "r", "r" } },
1077 { INDEX_op_ld16s_i32, { "r", "r" } },
1078 { INDEX_op_ld_i32, { "r", "r" } },
1079 { INDEX_op_st8_i32, { "q", "r" } },
1080 { INDEX_op_st16_i32, { "r", "r" } },
1081 { INDEX_op_st_i32, { "r", "r" } },
1082
1083 { INDEX_op_add_i32, { "r", "0", "ri" } },
1084 { INDEX_op_sub_i32, { "r", "0", "ri" } },
1085 { INDEX_op_mul_i32, { "r", "0", "ri" } },
1086 { INDEX_op_mulu2_i32, { "a", "d", "a", "r" } },
1087 { INDEX_op_div2_i32, { "a", "d", "0", "1", "r" } },
1088 { INDEX_op_divu2_i32, { "a", "d", "0", "1", "r" } },
1089 { INDEX_op_and_i32, { "r", "0", "ri" } },
1090 { INDEX_op_or_i32, { "r", "0", "ri" } },
1091 { INDEX_op_xor_i32, { "r", "0", "ri" } },
1092
1093 { INDEX_op_shl_i32, { "r", "0", "ci" } },
1094 { INDEX_op_shr_i32, { "r", "0", "ci" } },
1095 { INDEX_op_sar_i32, { "r", "0", "ci" } },
1096
1097 { INDEX_op_brcond_i32, { "r", "ri" } },
1098
1099 { INDEX_op_add2_i32, { "r", "r", "0", "1", "ri", "ri" } },
1100 { INDEX_op_sub2_i32, { "r", "r", "0", "1", "ri", "ri" } },
1101 { INDEX_op_brcond2_i32, { "r", "r", "ri", "ri" } },
1102
1103#if TARGET_LONG_BITS == 32
1104 { INDEX_op_qemu_ld8u, { "r", "L" } },
1105 { INDEX_op_qemu_ld8s, { "r", "L" } },
1106 { INDEX_op_qemu_ld16u, { "r", "L" } },
1107 { INDEX_op_qemu_ld16s, { "r", "L" } },
1108 { INDEX_op_qemu_ld32u, { "r", "L" } },
1109 { INDEX_op_qemu_ld64, { "r", "r", "L" } },
1110
1111 { INDEX_op_qemu_st8, { "cb", "L" } },
1112 { INDEX_op_qemu_st16, { "L", "L" } },
1113 { INDEX_op_qemu_st32, { "L", "L" } },
1114 { INDEX_op_qemu_st64, { "L", "L", "L" } },
1115#else
1116 { INDEX_op_qemu_ld8u, { "r", "L", "L" } },
1117 { INDEX_op_qemu_ld8s, { "r", "L", "L" } },
1118 { INDEX_op_qemu_ld16u, { "r", "L", "L" } },
1119 { INDEX_op_qemu_ld16s, { "r", "L", "L" } },
1120 { INDEX_op_qemu_ld32u, { "r", "L", "L" } },
1121 { INDEX_op_qemu_ld64, { "r", "r", "L", "L" } },
1122
1123 { INDEX_op_qemu_st8, { "cb", "L", "L" } },
1124 { INDEX_op_qemu_st16, { "L", "L", "L" } },
1125 { INDEX_op_qemu_st32, { "L", "L", "L" } },
1126 { INDEX_op_qemu_st64, { "L", "L", "L", "L" } },
1127#endif
1128 { -1 },
1129};
1130
b03cce8e
FB
1131static int tcg_target_callee_save_regs[] = {
1132 /* TCG_REG_EBP, */ /* currently used for the global env, so no
1133 need to save */
1134 TCG_REG_EBX,
1135 TCG_REG_ESI,
1136 TCG_REG_EDI,
1137};
1138
1139static inline void tcg_out_push(TCGContext *s, int reg)
1140{
1141 tcg_out_opc(s, 0x50 + reg);
1142}
1143
1144static inline void tcg_out_pop(TCGContext *s, int reg)
1145{
1146 tcg_out_opc(s, 0x58 + reg);
1147}
1148
1149/* Generate global QEMU prologue and epilogue code */
1150void tcg_target_qemu_prologue(TCGContext *s)
1151{
1152 int i, frame_size, push_size, stack_addend;
1153
1154 /* TB prologue */
1155 /* save all callee saved registers */
1156 for(i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
1157 tcg_out_push(s, tcg_target_callee_save_regs[i]);
1158 }
1159 /* reserve some stack space */
1160 push_size = 4 + ARRAY_SIZE(tcg_target_callee_save_regs) * 4;
1161 frame_size = push_size + TCG_STATIC_CALL_ARGS_SIZE;
1162 frame_size = (frame_size + TCG_TARGET_STACK_ALIGN - 1) &
1163 ~(TCG_TARGET_STACK_ALIGN - 1);
1164 stack_addend = frame_size - push_size;
1165 tcg_out_addi(s, TCG_REG_ESP, -stack_addend);
1166
1167 tcg_out_modrm(s, 0xff, 4, TCG_REG_EAX); /* jmp *%eax */
1168
1169 /* TB epilogue */
1170 tb_ret_addr = s->code_ptr;
1171 tcg_out_addi(s, TCG_REG_ESP, stack_addend);
1172 for(i = ARRAY_SIZE(tcg_target_callee_save_regs) - 1; i >= 0; i--) {
1173 tcg_out_pop(s, tcg_target_callee_save_regs[i]);
1174 }
1175 tcg_out8(s, 0xc3); /* ret */
1176}
1177
c896fe29
FB
1178void tcg_target_init(TCGContext *s)
1179{
1180 /* fail safe */
1181 if ((1 << CPU_TLB_ENTRY_BITS) != sizeof(CPUTLBEntry))
1182 tcg_abort();
1183
1184 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xff);
1185 tcg_regset_set32(tcg_target_call_clobber_regs, 0,
1186 (1 << TCG_REG_EAX) |
1187 (1 << TCG_REG_EDX) |
1188 (1 << TCG_REG_ECX));
1189
1190 tcg_regset_clear(s->reserved_regs);
1191 tcg_regset_set_reg(s->reserved_regs, TCG_REG_ESP);
1192
1193 tcg_add_target_add_op_defs(x86_op_defs);
1194}