]> git.proxmox.com Git - qemu.git/blob - tcg/i386/tcg-target.c
tcg-i386: Tidy xchg.
[qemu.git] / tcg / i386 / tcg-target.c
1 /*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25 #ifndef NDEBUG
26 static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
27 "%eax",
28 "%ecx",
29 "%edx",
30 "%ebx",
31 "%esp",
32 "%ebp",
33 "%esi",
34 "%edi",
35 };
36 #endif
37
38 static const int tcg_target_reg_alloc_order[] = {
39 TCG_REG_EBX,
40 TCG_REG_ESI,
41 TCG_REG_EDI,
42 TCG_REG_EBP,
43 TCG_REG_ECX,
44 TCG_REG_EDX,
45 TCG_REG_EAX,
46 };
47
48 static const int tcg_target_call_iarg_regs[3] = { TCG_REG_EAX, TCG_REG_EDX, TCG_REG_ECX };
49 static const int tcg_target_call_oarg_regs[2] = { TCG_REG_EAX, TCG_REG_EDX };
50
51 static uint8_t *tb_ret_addr;
52
53 static void patch_reloc(uint8_t *code_ptr, int type,
54 tcg_target_long value, tcg_target_long addend)
55 {
56 value += addend;
57 switch(type) {
58 case R_386_32:
59 *(uint32_t *)code_ptr = value;
60 break;
61 case R_386_PC32:
62 *(uint32_t *)code_ptr = value - (long)code_ptr;
63 break;
64 case R_386_PC8:
65 value -= (long)code_ptr;
66 if (value != (int8_t)value) {
67 tcg_abort();
68 }
69 *(uint8_t *)code_ptr = value;
70 break;
71 default:
72 tcg_abort();
73 }
74 }
75
76 /* maximum number of register used for input function arguments */
77 static inline int tcg_target_get_call_iarg_regs_count(int flags)
78 {
79 flags &= TCG_CALL_TYPE_MASK;
80 switch(flags) {
81 case TCG_CALL_TYPE_STD:
82 return 0;
83 case TCG_CALL_TYPE_REGPARM_1:
84 case TCG_CALL_TYPE_REGPARM_2:
85 case TCG_CALL_TYPE_REGPARM:
86 return flags - TCG_CALL_TYPE_REGPARM_1 + 1;
87 default:
88 tcg_abort();
89 }
90 }
91
92 /* parse target specific constraints */
93 static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
94 {
95 const char *ct_str;
96
97 ct_str = *pct_str;
98 switch(ct_str[0]) {
99 case 'a':
100 ct->ct |= TCG_CT_REG;
101 tcg_regset_set_reg(ct->u.regs, TCG_REG_EAX);
102 break;
103 case 'b':
104 ct->ct |= TCG_CT_REG;
105 tcg_regset_set_reg(ct->u.regs, TCG_REG_EBX);
106 break;
107 case 'c':
108 ct->ct |= TCG_CT_REG;
109 tcg_regset_set_reg(ct->u.regs, TCG_REG_ECX);
110 break;
111 case 'd':
112 ct->ct |= TCG_CT_REG;
113 tcg_regset_set_reg(ct->u.regs, TCG_REG_EDX);
114 break;
115 case 'S':
116 ct->ct |= TCG_CT_REG;
117 tcg_regset_set_reg(ct->u.regs, TCG_REG_ESI);
118 break;
119 case 'D':
120 ct->ct |= TCG_CT_REG;
121 tcg_regset_set_reg(ct->u.regs, TCG_REG_EDI);
122 break;
123 case 'q':
124 ct->ct |= TCG_CT_REG;
125 tcg_regset_set32(ct->u.regs, 0, 0xf);
126 break;
127 case 'r':
128 ct->ct |= TCG_CT_REG;
129 tcg_regset_set32(ct->u.regs, 0, 0xff);
130 break;
131
132 /* qemu_ld/st address constraint */
133 case 'L':
134 ct->ct |= TCG_CT_REG;
135 tcg_regset_set32(ct->u.regs, 0, 0xff);
136 tcg_regset_reset_reg(ct->u.regs, TCG_REG_EAX);
137 tcg_regset_reset_reg(ct->u.regs, TCG_REG_EDX);
138 break;
139 default:
140 return -1;
141 }
142 ct_str++;
143 *pct_str = ct_str;
144 return 0;
145 }
146
147 /* test if a constant matches the constraint */
148 static inline int tcg_target_const_match(tcg_target_long val,
149 const TCGArgConstraint *arg_ct)
150 {
151 int ct;
152 ct = arg_ct->ct;
153 if (ct & TCG_CT_CONST)
154 return 1;
155 else
156 return 0;
157 }
158
159 #define P_EXT 0x100 /* 0x0f opcode prefix */
160
161 #define OPC_ARITH_EvIz (0x81)
162 #define OPC_ARITH_EvIb (0x83)
163 #define OPC_ARITH_GvEv (0x03) /* ... plus (ARITH_FOO << 3) */
164 #define OPC_ADD_GvEv (OPC_ARITH_GvEv | (ARITH_ADD << 3))
165 #define OPC_BSWAP (0xc8 | P_EXT)
166 #define OPC_CALL_Jz (0xe8)
167 #define OPC_CMP_GvEv (OPC_ARITH_GvEv | (ARITH_CMP << 3))
168 #define OPC_DEC_r32 (0x48)
169 #define OPC_IMUL_GvEv (0xaf | P_EXT)
170 #define OPC_IMUL_GvEvIb (0x6b)
171 #define OPC_IMUL_GvEvIz (0x69)
172 #define OPC_INC_r32 (0x40)
173 #define OPC_JCC_long (0x80 | P_EXT) /* ... plus condition code */
174 #define OPC_JCC_short (0x70) /* ... plus condition code */
175 #define OPC_JMP_long (0xe9)
176 #define OPC_JMP_short (0xeb)
177 #define OPC_MOVB_EvGv (0x88) /* stores, more or less */
178 #define OPC_MOVL_EvGv (0x89) /* stores, more or less */
179 #define OPC_MOVL_GvEv (0x8b) /* loads, more or less */
180 #define OPC_MOVL_Iv (0xb8)
181 #define OPC_MOVSBL (0xbe | P_EXT)
182 #define OPC_MOVSWL (0xbf | P_EXT)
183 #define OPC_MOVZBL (0xb6 | P_EXT)
184 #define OPC_MOVZWL (0xb7 | P_EXT)
185 #define OPC_POP_r32 (0x58)
186 #define OPC_PUSH_r32 (0x50)
187 #define OPC_PUSH_Iv (0x68)
188 #define OPC_PUSH_Ib (0x6a)
189 #define OPC_RET (0xc3)
190 #define OPC_SETCC (0x90 | P_EXT) /* ... plus condition code */
191 #define OPC_SHIFT_1 (0xd1)
192 #define OPC_SHIFT_Ib (0xc1)
193 #define OPC_SHIFT_cl (0xd3)
194 #define OPC_TESTL (0x85)
195 #define OPC_XCHG_ax_r32 (0x90)
196
197 #define OPC_GRP3_Ev (0xf7)
198 #define OPC_GRP5 (0xff)
199
200 /* Group 1 opcode extensions for 0x80-0x83.
201 These are also used as modifiers for OPC_ARITH. */
202 #define ARITH_ADD 0
203 #define ARITH_OR 1
204 #define ARITH_ADC 2
205 #define ARITH_SBB 3
206 #define ARITH_AND 4
207 #define ARITH_SUB 5
208 #define ARITH_XOR 6
209 #define ARITH_CMP 7
210
211 /* Group 2 opcode extensions for 0xc0, 0xc1, 0xd0-0xd3. */
212 #define SHIFT_ROL 0
213 #define SHIFT_ROR 1
214 #define SHIFT_SHL 4
215 #define SHIFT_SHR 5
216 #define SHIFT_SAR 7
217
218 /* Group 3 opcode extensions for 0xf6, 0xf7. To be used with OPC_GRP3. */
219 #define EXT3_NOT 2
220 #define EXT3_NEG 3
221 #define EXT3_MUL 4
222 #define EXT3_IMUL 5
223 #define EXT3_DIV 6
224 #define EXT3_IDIV 7
225
226 /* Group 5 opcode extensions for 0xff. To be used with OPC_GRP5. */
227 #define EXT5_CALLN_Ev 2
228 #define EXT5_JMPN_Ev 4
229
230 /* Condition codes to be added to OPC_JCC_{long,short}. */
231 #define JCC_JMP (-1)
232 #define JCC_JO 0x0
233 #define JCC_JNO 0x1
234 #define JCC_JB 0x2
235 #define JCC_JAE 0x3
236 #define JCC_JE 0x4
237 #define JCC_JNE 0x5
238 #define JCC_JBE 0x6
239 #define JCC_JA 0x7
240 #define JCC_JS 0x8
241 #define JCC_JNS 0x9
242 #define JCC_JP 0xa
243 #define JCC_JNP 0xb
244 #define JCC_JL 0xc
245 #define JCC_JGE 0xd
246 #define JCC_JLE 0xe
247 #define JCC_JG 0xf
248
249 static const uint8_t tcg_cond_to_jcc[10] = {
250 [TCG_COND_EQ] = JCC_JE,
251 [TCG_COND_NE] = JCC_JNE,
252 [TCG_COND_LT] = JCC_JL,
253 [TCG_COND_GE] = JCC_JGE,
254 [TCG_COND_LE] = JCC_JLE,
255 [TCG_COND_GT] = JCC_JG,
256 [TCG_COND_LTU] = JCC_JB,
257 [TCG_COND_GEU] = JCC_JAE,
258 [TCG_COND_LEU] = JCC_JBE,
259 [TCG_COND_GTU] = JCC_JA,
260 };
261
262 static inline void tcg_out_opc(TCGContext *s, int opc)
263 {
264 if (opc & P_EXT)
265 tcg_out8(s, 0x0f);
266 tcg_out8(s, opc);
267 }
268
269 static inline void tcg_out_modrm(TCGContext *s, int opc, int r, int rm)
270 {
271 tcg_out_opc(s, opc);
272 tcg_out8(s, 0xc0 | (r << 3) | rm);
273 }
274
275 /* rm == -1 means no register index */
276 static inline void tcg_out_modrm_offset(TCGContext *s, int opc, int r, int rm,
277 int32_t offset)
278 {
279 tcg_out_opc(s, opc);
280 if (rm == -1) {
281 tcg_out8(s, 0x05 | (r << 3));
282 tcg_out32(s, offset);
283 } else if (offset == 0 && rm != TCG_REG_EBP) {
284 if (rm == TCG_REG_ESP) {
285 tcg_out8(s, 0x04 | (r << 3));
286 tcg_out8(s, 0x24);
287 } else {
288 tcg_out8(s, 0x00 | (r << 3) | rm);
289 }
290 } else if ((int8_t)offset == offset) {
291 if (rm == TCG_REG_ESP) {
292 tcg_out8(s, 0x44 | (r << 3));
293 tcg_out8(s, 0x24);
294 } else {
295 tcg_out8(s, 0x40 | (r << 3) | rm);
296 }
297 tcg_out8(s, offset);
298 } else {
299 if (rm == TCG_REG_ESP) {
300 tcg_out8(s, 0x84 | (r << 3));
301 tcg_out8(s, 0x24);
302 } else {
303 tcg_out8(s, 0x80 | (r << 3) | rm);
304 }
305 tcg_out32(s, offset);
306 }
307 }
308
309 /* Generate dest op= src. Uses the same ARITH_* codes as tgen_arithi. */
310 static inline void tgen_arithr(TCGContext *s, int subop, int dest, int src)
311 {
312 tcg_out_modrm(s, OPC_ARITH_GvEv + (subop << 3), dest, src);
313 }
314
315 static inline void tcg_out_mov(TCGContext *s, int ret, int arg)
316 {
317 if (arg != ret) {
318 tcg_out_modrm(s, OPC_MOVL_GvEv, ret, arg);
319 }
320 }
321
322 static inline void tcg_out_movi(TCGContext *s, TCGType type,
323 int ret, int32_t arg)
324 {
325 if (arg == 0) {
326 tgen_arithr(s, ARITH_XOR, ret, ret);
327 } else {
328 tcg_out8(s, OPC_MOVL_Iv + ret);
329 tcg_out32(s, arg);
330 }
331 }
332
333 static inline void tcg_out_pushi(TCGContext *s, tcg_target_long val)
334 {
335 if (val == (int8_t)val) {
336 tcg_out_opc(s, OPC_PUSH_Ib);
337 tcg_out8(s, val);
338 } else {
339 tcg_out_opc(s, OPC_PUSH_Iv);
340 tcg_out32(s, val);
341 }
342 }
343
344 static inline void tcg_out_push(TCGContext *s, int reg)
345 {
346 tcg_out_opc(s, OPC_PUSH_r32 + reg);
347 }
348
349 static inline void tcg_out_pop(TCGContext *s, int reg)
350 {
351 tcg_out_opc(s, OPC_POP_r32 + reg);
352 }
353
354 static inline void tcg_out_ld(TCGContext *s, TCGType type, int ret,
355 int arg1, tcg_target_long arg2)
356 {
357 tcg_out_modrm_offset(s, OPC_MOVL_GvEv, ret, arg1, arg2);
358 }
359
360 static inline void tcg_out_st(TCGContext *s, TCGType type, int arg,
361 int arg1, tcg_target_long arg2)
362 {
363 tcg_out_modrm_offset(s, OPC_MOVL_EvGv, arg, arg1, arg2);
364 }
365
366 static void tcg_out_shifti(TCGContext *s, int subopc, int reg, int count)
367 {
368 if (count == 1) {
369 tcg_out_modrm(s, OPC_SHIFT_1, subopc, reg);
370 } else {
371 tcg_out_modrm(s, OPC_SHIFT_Ib, subopc, reg);
372 tcg_out8(s, count);
373 }
374 }
375
376 static inline void tcg_out_bswap32(TCGContext *s, int reg)
377 {
378 tcg_out_opc(s, OPC_BSWAP + reg);
379 }
380
381 static inline void tcg_out_rolw_8(TCGContext *s, int reg)
382 {
383 tcg_out8(s, 0x66);
384 tcg_out_shifti(s, SHIFT_ROL, reg, 8);
385 }
386
387 static inline void tcg_out_ext8u(TCGContext *s, int dest, int src)
388 {
389 /* movzbl */
390 assert(src < 4);
391 tcg_out_modrm(s, OPC_MOVZBL, dest, src);
392 }
393
394 static void tcg_out_ext8s(TCGContext *s, int dest, int src)
395 {
396 /* movsbl */
397 assert(src < 4);
398 tcg_out_modrm(s, OPC_MOVSBL, dest, src);
399 }
400
401 static inline void tcg_out_ext16u(TCGContext *s, int dest, int src)
402 {
403 /* movzwl */
404 tcg_out_modrm(s, OPC_MOVZWL, dest, src);
405 }
406
407 static inline void tcg_out_ext16s(TCGContext *s, int dest, int src)
408 {
409 /* movswl */
410 tcg_out_modrm(s, OPC_MOVSWL, dest, src);
411 }
412
413 static inline void tgen_arithi(TCGContext *s, int c, int r0,
414 int32_t val, int cf)
415 {
416 /* ??? While INC is 2 bytes shorter than ADDL $1, they also induce
417 partial flags update stalls on Pentium4 and are not recommended
418 by current Intel optimization manuals. */
419 if (!cf && (c == ARITH_ADD || c == ARITH_SUB) && (val == 1 || val == -1)) {
420 int opc = ((c == ARITH_ADD) ^ (val < 0) ? OPC_INC_r32 : OPC_DEC_r32);
421 tcg_out_opc(s, opc + r0);
422 } else if (val == (int8_t)val) {
423 tcg_out_modrm(s, OPC_ARITH_EvIb, c, r0);
424 tcg_out8(s, val);
425 } else if (c == ARITH_AND && val == 0xffu && r0 < 4) {
426 tcg_out_ext8u(s, r0, r0);
427 } else if (c == ARITH_AND && val == 0xffffu) {
428 tcg_out_ext16u(s, r0, r0);
429 } else {
430 tcg_out_modrm(s, OPC_ARITH_EvIz, c, r0);
431 tcg_out32(s, val);
432 }
433 }
434
435 static void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val)
436 {
437 if (val != 0)
438 tgen_arithi(s, ARITH_ADD, reg, val, 0);
439 }
440
441 /* Use SMALL != 0 to force a short forward branch. */
442 static void tcg_out_jxx(TCGContext *s, int opc, int label_index, int small)
443 {
444 int32_t val, val1;
445 TCGLabel *l = &s->labels[label_index];
446
447 if (l->has_value) {
448 val = l->u.value - (tcg_target_long)s->code_ptr;
449 val1 = val - 2;
450 if ((int8_t)val1 == val1) {
451 if (opc == -1) {
452 tcg_out8(s, OPC_JMP_short);
453 } else {
454 tcg_out8(s, OPC_JCC_short + opc);
455 }
456 tcg_out8(s, val1);
457 } else {
458 if (small) {
459 tcg_abort();
460 }
461 if (opc == -1) {
462 tcg_out8(s, OPC_JMP_long);
463 tcg_out32(s, val - 5);
464 } else {
465 tcg_out_opc(s, OPC_JCC_long + opc);
466 tcg_out32(s, val - 6);
467 }
468 }
469 } else if (small) {
470 if (opc == -1) {
471 tcg_out8(s, OPC_JMP_short);
472 } else {
473 tcg_out8(s, OPC_JCC_short + opc);
474 }
475 tcg_out_reloc(s, s->code_ptr, R_386_PC8, label_index, -1);
476 s->code_ptr += 1;
477 } else {
478 if (opc == -1) {
479 tcg_out8(s, OPC_JMP_long);
480 } else {
481 tcg_out_opc(s, OPC_JCC_long + opc);
482 }
483 tcg_out_reloc(s, s->code_ptr, R_386_PC32, label_index, -4);
484 s->code_ptr += 4;
485 }
486 }
487
488 static void tcg_out_cmp(TCGContext *s, TCGArg arg1, TCGArg arg2,
489 int const_arg2)
490 {
491 if (const_arg2) {
492 if (arg2 == 0) {
493 /* test r, r */
494 tcg_out_modrm(s, OPC_TESTL, arg1, arg1);
495 } else {
496 tgen_arithi(s, ARITH_CMP, arg1, arg2, 0);
497 }
498 } else {
499 tgen_arithr(s, ARITH_CMP, arg1, arg2);
500 }
501 }
502
503 static void tcg_out_brcond(TCGContext *s, TCGCond cond,
504 TCGArg arg1, TCGArg arg2, int const_arg2,
505 int label_index, int small)
506 {
507 tcg_out_cmp(s, arg1, arg2, const_arg2);
508 tcg_out_jxx(s, tcg_cond_to_jcc[cond], label_index, small);
509 }
510
511 /* XXX: we implement it at the target level to avoid having to
512 handle cross basic blocks temporaries */
513 static void tcg_out_brcond2(TCGContext *s, const TCGArg *args,
514 const int *const_args, int small)
515 {
516 int label_next;
517 label_next = gen_new_label();
518 switch(args[4]) {
519 case TCG_COND_EQ:
520 tcg_out_brcond(s, TCG_COND_NE, args[0], args[2], const_args[2],
521 label_next, 1);
522 tcg_out_brcond(s, TCG_COND_EQ, args[1], args[3], const_args[3],
523 args[5], small);
524 break;
525 case TCG_COND_NE:
526 tcg_out_brcond(s, TCG_COND_NE, args[0], args[2], const_args[2],
527 args[5], small);
528 tcg_out_brcond(s, TCG_COND_NE, args[1], args[3], const_args[3],
529 args[5], small);
530 break;
531 case TCG_COND_LT:
532 tcg_out_brcond(s, TCG_COND_LT, args[1], args[3], const_args[3],
533 args[5], small);
534 tcg_out_jxx(s, JCC_JNE, label_next, 1);
535 tcg_out_brcond(s, TCG_COND_LTU, args[0], args[2], const_args[2],
536 args[5], small);
537 break;
538 case TCG_COND_LE:
539 tcg_out_brcond(s, TCG_COND_LT, args[1], args[3], const_args[3],
540 args[5], small);
541 tcg_out_jxx(s, JCC_JNE, label_next, 1);
542 tcg_out_brcond(s, TCG_COND_LEU, args[0], args[2], const_args[2],
543 args[5], small);
544 break;
545 case TCG_COND_GT:
546 tcg_out_brcond(s, TCG_COND_GT, args[1], args[3], const_args[3],
547 args[5], small);
548 tcg_out_jxx(s, JCC_JNE, label_next, 1);
549 tcg_out_brcond(s, TCG_COND_GTU, args[0], args[2], const_args[2],
550 args[5], small);
551 break;
552 case TCG_COND_GE:
553 tcg_out_brcond(s, TCG_COND_GT, args[1], args[3], const_args[3],
554 args[5], small);
555 tcg_out_jxx(s, JCC_JNE, label_next, 1);
556 tcg_out_brcond(s, TCG_COND_GEU, args[0], args[2], const_args[2],
557 args[5], small);
558 break;
559 case TCG_COND_LTU:
560 tcg_out_brcond(s, TCG_COND_LTU, args[1], args[3], const_args[3],
561 args[5], small);
562 tcg_out_jxx(s, JCC_JNE, label_next, 1);
563 tcg_out_brcond(s, TCG_COND_LTU, args[0], args[2], const_args[2],
564 args[5], small);
565 break;
566 case TCG_COND_LEU:
567 tcg_out_brcond(s, TCG_COND_LTU, args[1], args[3], const_args[3],
568 args[5], small);
569 tcg_out_jxx(s, JCC_JNE, label_next, 1);
570 tcg_out_brcond(s, TCG_COND_LEU, args[0], args[2], const_args[2],
571 args[5], small);
572 break;
573 case TCG_COND_GTU:
574 tcg_out_brcond(s, TCG_COND_GTU, args[1], args[3], const_args[3],
575 args[5], small);
576 tcg_out_jxx(s, JCC_JNE, label_next, 1);
577 tcg_out_brcond(s, TCG_COND_GTU, args[0], args[2], const_args[2],
578 args[5], small);
579 break;
580 case TCG_COND_GEU:
581 tcg_out_brcond(s, TCG_COND_GTU, args[1], args[3], const_args[3],
582 args[5], small);
583 tcg_out_jxx(s, JCC_JNE, label_next, 1);
584 tcg_out_brcond(s, TCG_COND_GEU, args[0], args[2], const_args[2],
585 args[5], small);
586 break;
587 default:
588 tcg_abort();
589 }
590 tcg_out_label(s, label_next, (tcg_target_long)s->code_ptr);
591 }
592
593 static void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGArg dest,
594 TCGArg arg1, TCGArg arg2, int const_arg2)
595 {
596 tcg_out_cmp(s, arg1, arg2, const_arg2);
597 tcg_out_modrm(s, OPC_SETCC | tcg_cond_to_jcc[cond], 0, dest);
598 tcg_out_ext8u(s, dest, dest);
599 }
600
601 static void tcg_out_setcond2(TCGContext *s, const TCGArg *args,
602 const int *const_args)
603 {
604 TCGArg new_args[6];
605 int label_true, label_over;
606
607 memcpy(new_args, args+1, 5*sizeof(TCGArg));
608
609 if (args[0] == args[1] || args[0] == args[2]
610 || (!const_args[3] && args[0] == args[3])
611 || (!const_args[4] && args[0] == args[4])) {
612 /* When the destination overlaps with one of the argument
613 registers, don't do anything tricky. */
614 label_true = gen_new_label();
615 label_over = gen_new_label();
616
617 new_args[5] = label_true;
618 tcg_out_brcond2(s, new_args, const_args+1, 1);
619
620 tcg_out_movi(s, TCG_TYPE_I32, args[0], 0);
621 tcg_out_jxx(s, JCC_JMP, label_over, 1);
622 tcg_out_label(s, label_true, (tcg_target_long)s->code_ptr);
623
624 tcg_out_movi(s, TCG_TYPE_I32, args[0], 1);
625 tcg_out_label(s, label_over, (tcg_target_long)s->code_ptr);
626 } else {
627 /* When the destination does not overlap one of the arguments,
628 clear the destination first, jump if cond false, and emit an
629 increment in the true case. This results in smaller code. */
630
631 tcg_out_movi(s, TCG_TYPE_I32, args[0], 0);
632
633 label_over = gen_new_label();
634 new_args[4] = tcg_invert_cond(new_args[4]);
635 new_args[5] = label_over;
636 tcg_out_brcond2(s, new_args, const_args+1, 1);
637
638 tgen_arithi(s, ARITH_ADD, args[0], 1, 0);
639 tcg_out_label(s, label_over, (tcg_target_long)s->code_ptr);
640 }
641 }
642
643 static void tcg_out_calli(TCGContext *s, tcg_target_long dest)
644 {
645 tcg_out_opc(s, OPC_CALL_Jz);
646 tcg_out32(s, dest - (tcg_target_long)s->code_ptr - 4);
647 }
648
649 #if defined(CONFIG_SOFTMMU)
650
651 #include "../../softmmu_defs.h"
652
653 static void *qemu_ld_helpers[4] = {
654 __ldb_mmu,
655 __ldw_mmu,
656 __ldl_mmu,
657 __ldq_mmu,
658 };
659
660 static void *qemu_st_helpers[4] = {
661 __stb_mmu,
662 __stw_mmu,
663 __stl_mmu,
664 __stq_mmu,
665 };
666 #endif
667
668 #ifndef CONFIG_USER_ONLY
669 #define GUEST_BASE 0
670 #endif
671
672 /* XXX: qemu_ld and qemu_st could be modified to clobber only EDX and
673 EAX. It will be useful once fixed registers globals are less
674 common. */
675 static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args,
676 int opc)
677 {
678 int addr_reg, data_reg, data_reg2, r0, r1, mem_index, s_bits, bswap;
679 #if defined(CONFIG_SOFTMMU)
680 uint8_t *label1_ptr, *label2_ptr;
681 #endif
682 #if TARGET_LONG_BITS == 64
683 #if defined(CONFIG_SOFTMMU)
684 uint8_t *label3_ptr;
685 #endif
686 int addr_reg2;
687 #endif
688
689 data_reg = *args++;
690 if (opc == 3)
691 data_reg2 = *args++;
692 else
693 data_reg2 = 0;
694 addr_reg = *args++;
695 #if TARGET_LONG_BITS == 64
696 addr_reg2 = *args++;
697 #endif
698 mem_index = *args;
699 s_bits = opc & 3;
700
701 r0 = TCG_REG_EAX;
702 r1 = TCG_REG_EDX;
703
704 #if defined(CONFIG_SOFTMMU)
705 tcg_out_mov(s, r1, addr_reg);
706 tcg_out_mov(s, r0, addr_reg);
707
708 tcg_out_shifti(s, SHIFT_SHR, r1, TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
709
710 tgen_arithi(s, ARITH_AND, r0, TARGET_PAGE_MASK | ((1 << s_bits) - 1), 0);
711 tgen_arithi(s, ARITH_AND, r1, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS, 0);
712
713 tcg_out_opc(s, 0x8d); /* lea offset(r1, %ebp), r1 */
714 tcg_out8(s, 0x80 | (r1 << 3) | 0x04);
715 tcg_out8(s, (5 << 3) | r1);
716 tcg_out32(s, offsetof(CPUState, tlb_table[mem_index][0].addr_read));
717
718 /* cmp 0(r1), r0 */
719 tcg_out_modrm_offset(s, OPC_CMP_GvEv, r0, r1, 0);
720
721 tcg_out_mov(s, r0, addr_reg);
722
723 #if TARGET_LONG_BITS == 32
724 /* je label1 */
725 tcg_out8(s, OPC_JCC_short + JCC_JE);
726 label1_ptr = s->code_ptr;
727 s->code_ptr++;
728 #else
729 /* jne label3 */
730 tcg_out8(s, OPC_JCC_short + JCC_JNE);
731 label3_ptr = s->code_ptr;
732 s->code_ptr++;
733
734 /* cmp 4(r1), addr_reg2 */
735 tcg_out_modrm_offset(s, OPC_CMP_GvEv, addr_reg2, r1, 4);
736
737 /* je label1 */
738 tcg_out8(s, OPC_JCC_short + JCC_JE);
739 label1_ptr = s->code_ptr;
740 s->code_ptr++;
741
742 /* label3: */
743 *label3_ptr = s->code_ptr - label3_ptr - 1;
744 #endif
745
746 /* XXX: move that code at the end of the TB */
747 #if TARGET_LONG_BITS == 32
748 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_EDX, mem_index);
749 #else
750 tcg_out_mov(s, TCG_REG_EDX, addr_reg2);
751 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_ECX, mem_index);
752 #endif
753 tcg_out_calli(s, (tcg_target_long)qemu_ld_helpers[s_bits]);
754
755 switch(opc) {
756 case 0 | 4:
757 tcg_out_ext8s(s, data_reg, TCG_REG_EAX);
758 break;
759 case 1 | 4:
760 tcg_out_ext16s(s, data_reg, TCG_REG_EAX);
761 break;
762 case 0:
763 tcg_out_ext8u(s, data_reg, TCG_REG_EAX);
764 break;
765 case 1:
766 tcg_out_ext16u(s, data_reg, TCG_REG_EAX);
767 break;
768 case 2:
769 default:
770 tcg_out_mov(s, data_reg, TCG_REG_EAX);
771 break;
772 case 3:
773 if (data_reg == TCG_REG_EDX) {
774 /* xchg %edx, %eax */
775 tcg_out_opc(s, OPC_XCHG_ax_r32 + TCG_REG_EDX);
776 tcg_out_mov(s, data_reg2, TCG_REG_EAX);
777 } else {
778 tcg_out_mov(s, data_reg, TCG_REG_EAX);
779 tcg_out_mov(s, data_reg2, TCG_REG_EDX);
780 }
781 break;
782 }
783
784 /* jmp label2 */
785 tcg_out8(s, OPC_JMP_short);
786 label2_ptr = s->code_ptr;
787 s->code_ptr++;
788
789 /* label1: */
790 *label1_ptr = s->code_ptr - label1_ptr - 1;
791
792 /* add x(r1), r0 */
793 tcg_out_modrm_offset(s, OPC_ADD_GvEv, r0, r1,
794 offsetof(CPUTLBEntry, addend) -
795 offsetof(CPUTLBEntry, addr_read));
796 #else
797 r0 = addr_reg;
798 #endif
799
800 #ifdef TARGET_WORDS_BIGENDIAN
801 bswap = 1;
802 #else
803 bswap = 0;
804 #endif
805 switch(opc) {
806 case 0:
807 /* movzbl */
808 tcg_out_modrm_offset(s, OPC_MOVZBL, data_reg, r0, GUEST_BASE);
809 break;
810 case 0 | 4:
811 /* movsbl */
812 tcg_out_modrm_offset(s, OPC_MOVSBL, data_reg, r0, GUEST_BASE);
813 break;
814 case 1:
815 /* movzwl */
816 tcg_out_modrm_offset(s, OPC_MOVZWL, data_reg, r0, GUEST_BASE);
817 if (bswap) {
818 tcg_out_rolw_8(s, data_reg);
819 }
820 break;
821 case 1 | 4:
822 /* movswl */
823 tcg_out_modrm_offset(s, OPC_MOVSWL, data_reg, r0, GUEST_BASE);
824 if (bswap) {
825 tcg_out_rolw_8(s, data_reg);
826
827 /* movswl data_reg, data_reg */
828 tcg_out_modrm(s, OPC_MOVSWL, data_reg, data_reg);
829 }
830 break;
831 case 2:
832 tcg_out_ld(s, TCG_TYPE_I32, data_reg, r0, GUEST_BASE);
833 if (bswap) {
834 tcg_out_bswap32(s, data_reg);
835 }
836 break;
837 case 3:
838 if (bswap) {
839 int t = data_reg;
840 data_reg = data_reg2;
841 data_reg2 = t;
842 }
843 if (r0 != data_reg) {
844 tcg_out_ld(s, TCG_TYPE_I32, data_reg, r0, GUEST_BASE);
845 tcg_out_ld(s, TCG_TYPE_I32, data_reg2, r0, GUEST_BASE + 4);
846 } else {
847 tcg_out_ld(s, TCG_TYPE_I32, data_reg2, r0, GUEST_BASE + 4);
848 tcg_out_ld(s, TCG_TYPE_I32, data_reg, r0, GUEST_BASE);
849 }
850 if (bswap) {
851 tcg_out_bswap32(s, data_reg);
852 tcg_out_bswap32(s, data_reg2);
853 }
854 break;
855 default:
856 tcg_abort();
857 }
858
859 #if defined(CONFIG_SOFTMMU)
860 /* label2: */
861 *label2_ptr = s->code_ptr - label2_ptr - 1;
862 #endif
863 }
864
865
866 static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args,
867 int opc)
868 {
869 int addr_reg, data_reg, data_reg2, r0, r1, mem_index, s_bits, bswap;
870 #if defined(CONFIG_SOFTMMU)
871 int stack_adjust;
872 uint8_t *label1_ptr, *label2_ptr;
873 #endif
874 #if TARGET_LONG_BITS == 64
875 #if defined(CONFIG_SOFTMMU)
876 uint8_t *label3_ptr;
877 #endif
878 int addr_reg2;
879 #endif
880
881 data_reg = *args++;
882 if (opc == 3)
883 data_reg2 = *args++;
884 else
885 data_reg2 = 0;
886 addr_reg = *args++;
887 #if TARGET_LONG_BITS == 64
888 addr_reg2 = *args++;
889 #endif
890 mem_index = *args;
891
892 s_bits = opc;
893
894 r0 = TCG_REG_EAX;
895 r1 = TCG_REG_EDX;
896
897 #if defined(CONFIG_SOFTMMU)
898 tcg_out_mov(s, r1, addr_reg);
899 tcg_out_mov(s, r0, addr_reg);
900
901 tcg_out_shifti(s, SHIFT_SHR, r1, TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
902
903 tgen_arithi(s, ARITH_AND, r0, TARGET_PAGE_MASK | ((1 << s_bits) - 1), 0);
904 tgen_arithi(s, ARITH_AND, r1, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS, 0);
905
906 tcg_out_opc(s, 0x8d); /* lea offset(r1, %ebp), r1 */
907 tcg_out8(s, 0x80 | (r1 << 3) | 0x04);
908 tcg_out8(s, (5 << 3) | r1);
909 tcg_out32(s, offsetof(CPUState, tlb_table[mem_index][0].addr_write));
910
911 /* cmp 0(r1), r0 */
912 tcg_out_modrm_offset(s, OPC_CMP_GvEv, r0, r1, 0);
913
914 tcg_out_mov(s, r0, addr_reg);
915
916 #if TARGET_LONG_BITS == 32
917 /* je label1 */
918 tcg_out8(s, OPC_JCC_short + JCC_JE);
919 label1_ptr = s->code_ptr;
920 s->code_ptr++;
921 #else
922 /* jne label3 */
923 tcg_out8(s, OPC_JCC_short + JCC_JNE);
924 label3_ptr = s->code_ptr;
925 s->code_ptr++;
926
927 /* cmp 4(r1), addr_reg2 */
928 tcg_out_modrm_offset(s, OPC_CMP_GvEv, addr_reg2, r1, 4);
929
930 /* je label1 */
931 tcg_out8(s, OPC_JCC_short + JCC_JE);
932 label1_ptr = s->code_ptr;
933 s->code_ptr++;
934
935 /* label3: */
936 *label3_ptr = s->code_ptr - label3_ptr - 1;
937 #endif
938
939 /* XXX: move that code at the end of the TB */
940 #if TARGET_LONG_BITS == 32
941 if (opc == 3) {
942 tcg_out_mov(s, TCG_REG_EDX, data_reg);
943 tcg_out_mov(s, TCG_REG_ECX, data_reg2);
944 tcg_out_pushi(s, mem_index);
945 stack_adjust = 4;
946 } else {
947 switch(opc) {
948 case 0:
949 tcg_out_ext8u(s, TCG_REG_EDX, data_reg);
950 break;
951 case 1:
952 tcg_out_ext16u(s, TCG_REG_EDX, data_reg);
953 break;
954 case 2:
955 tcg_out_mov(s, TCG_REG_EDX, data_reg);
956 break;
957 }
958 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_ECX, mem_index);
959 stack_adjust = 0;
960 }
961 #else
962 if (opc == 3) {
963 tcg_out_mov(s, TCG_REG_EDX, addr_reg2);
964 tcg_out_pushi(s, mem_index);
965 tcg_out_push(s, data_reg2);
966 tcg_out_push(s, data_reg);
967 stack_adjust = 12;
968 } else {
969 tcg_out_mov(s, TCG_REG_EDX, addr_reg2);
970 switch(opc) {
971 case 0:
972 tcg_out_ext8u(s, TCG_REG_ECX, data_reg);
973 break;
974 case 1:
975 tcg_out_ext16u(s, TCG_REG_ECX, data_reg);
976 break;
977 case 2:
978 tcg_out_mov(s, TCG_REG_ECX, data_reg);
979 break;
980 }
981 tcg_out_pushi(s, mem_index);
982 stack_adjust = 4;
983 }
984 #endif
985
986 tcg_out_calli(s, (tcg_target_long)qemu_st_helpers[s_bits]);
987
988 if (stack_adjust == 4) {
989 /* Pop and discard. This is 2 bytes smaller than the add. */
990 tcg_out_pop(s, TCG_REG_ECX);
991 } else if (stack_adjust != 0) {
992 tcg_out_addi(s, TCG_REG_ESP, stack_adjust);
993 }
994
995 /* jmp label2 */
996 tcg_out8(s, OPC_JMP_short);
997 label2_ptr = s->code_ptr;
998 s->code_ptr++;
999
1000 /* label1: */
1001 *label1_ptr = s->code_ptr - label1_ptr - 1;
1002
1003 /* add x(r1), r0 */
1004 tcg_out_modrm_offset(s, OPC_ADD_GvEv, r0, r1,
1005 offsetof(CPUTLBEntry, addend) -
1006 offsetof(CPUTLBEntry, addr_write));
1007 #else
1008 r0 = addr_reg;
1009 #endif
1010
1011 #ifdef TARGET_WORDS_BIGENDIAN
1012 bswap = 1;
1013 #else
1014 bswap = 0;
1015 #endif
1016 switch(opc) {
1017 case 0:
1018 tcg_out_modrm_offset(s, OPC_MOVB_EvGv, data_reg, r0, GUEST_BASE);
1019 break;
1020 case 1:
1021 if (bswap) {
1022 tcg_out_mov(s, r1, data_reg);
1023 tcg_out_rolw_8(s, r1);
1024 data_reg = r1;
1025 }
1026 /* movw */
1027 tcg_out8(s, 0x66);
1028 tcg_out_modrm_offset(s, OPC_MOVL_EvGv, data_reg, r0, GUEST_BASE);
1029 break;
1030 case 2:
1031 if (bswap) {
1032 tcg_out_mov(s, r1, data_reg);
1033 tcg_out_bswap32(s, r1);
1034 data_reg = r1;
1035 }
1036 tcg_out_st(s, TCG_TYPE_I32, data_reg, r0, GUEST_BASE);
1037 break;
1038 case 3:
1039 if (bswap) {
1040 tcg_out_mov(s, r1, data_reg2);
1041 tcg_out_bswap32(s, r1);
1042 tcg_out_st(s, TCG_TYPE_I32, r1, r0, GUEST_BASE);
1043 tcg_out_mov(s, r1, data_reg);
1044 tcg_out_bswap32(s, r1);
1045 tcg_out_st(s, TCG_TYPE_I32, r1, r0, GUEST_BASE + 4);
1046 } else {
1047 tcg_out_st(s, TCG_TYPE_I32, data_reg, r0, GUEST_BASE);
1048 tcg_out_st(s, TCG_TYPE_I32, data_reg2, r0, GUEST_BASE + 4);
1049 }
1050 break;
1051 default:
1052 tcg_abort();
1053 }
1054
1055 #if defined(CONFIG_SOFTMMU)
1056 /* label2: */
1057 *label2_ptr = s->code_ptr - label2_ptr - 1;
1058 #endif
1059 }
1060
1061 static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
1062 const TCGArg *args, const int *const_args)
1063 {
1064 int c;
1065
1066 switch(opc) {
1067 case INDEX_op_exit_tb:
1068 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_EAX, args[0]);
1069 tcg_out8(s, OPC_JMP_long); /* jmp tb_ret_addr */
1070 tcg_out32(s, tb_ret_addr - s->code_ptr - 4);
1071 break;
1072 case INDEX_op_goto_tb:
1073 if (s->tb_jmp_offset) {
1074 /* direct jump method */
1075 tcg_out8(s, OPC_JMP_long); /* jmp im */
1076 s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
1077 tcg_out32(s, 0);
1078 } else {
1079 /* indirect jump method */
1080 tcg_out_modrm_offset(s, OPC_GRP5, EXT5_JMPN_Ev, -1,
1081 (tcg_target_long)(s->tb_next + args[0]));
1082 }
1083 s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf;
1084 break;
1085 case INDEX_op_call:
1086 if (const_args[0]) {
1087 tcg_out_calli(s, args[0]);
1088 } else {
1089 /* call *reg */
1090 tcg_out_modrm(s, OPC_GRP5, EXT5_CALLN_Ev, args[0]);
1091 }
1092 break;
1093 case INDEX_op_jmp:
1094 if (const_args[0]) {
1095 tcg_out8(s, OPC_JMP_long);
1096 tcg_out32(s, args[0] - (tcg_target_long)s->code_ptr - 4);
1097 } else {
1098 /* jmp *reg */
1099 tcg_out_modrm(s, OPC_GRP5, EXT5_JMPN_Ev, args[0]);
1100 }
1101 break;
1102 case INDEX_op_br:
1103 tcg_out_jxx(s, JCC_JMP, args[0], 0);
1104 break;
1105 case INDEX_op_movi_i32:
1106 tcg_out_movi(s, TCG_TYPE_I32, args[0], args[1]);
1107 break;
1108 case INDEX_op_ld8u_i32:
1109 /* movzbl */
1110 tcg_out_modrm_offset(s, OPC_MOVZBL, args[0], args[1], args[2]);
1111 break;
1112 case INDEX_op_ld8s_i32:
1113 /* movsbl */
1114 tcg_out_modrm_offset(s, OPC_MOVSBL, args[0], args[1], args[2]);
1115 break;
1116 case INDEX_op_ld16u_i32:
1117 /* movzwl */
1118 tcg_out_modrm_offset(s, OPC_MOVZWL, args[0], args[1], args[2]);
1119 break;
1120 case INDEX_op_ld16s_i32:
1121 /* movswl */
1122 tcg_out_modrm_offset(s, OPC_MOVSWL, args[0], args[1], args[2]);
1123 break;
1124 case INDEX_op_ld_i32:
1125 tcg_out_ld(s, TCG_TYPE_I32, args[0], args[1], args[2]);
1126 break;
1127 case INDEX_op_st8_i32:
1128 /* movb */
1129 tcg_out_modrm_offset(s, OPC_MOVB_EvGv, args[0], args[1], args[2]);
1130 break;
1131 case INDEX_op_st16_i32:
1132 /* movw */
1133 tcg_out8(s, 0x66);
1134 tcg_out_modrm_offset(s, OPC_MOVL_EvGv, args[0], args[1], args[2]);
1135 break;
1136 case INDEX_op_st_i32:
1137 tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]);
1138 break;
1139 case INDEX_op_sub_i32:
1140 c = ARITH_SUB;
1141 goto gen_arith;
1142 case INDEX_op_and_i32:
1143 c = ARITH_AND;
1144 goto gen_arith;
1145 case INDEX_op_or_i32:
1146 c = ARITH_OR;
1147 goto gen_arith;
1148 case INDEX_op_xor_i32:
1149 c = ARITH_XOR;
1150 goto gen_arith;
1151 case INDEX_op_add_i32:
1152 c = ARITH_ADD;
1153 gen_arith:
1154 if (const_args[2]) {
1155 tgen_arithi(s, c, args[0], args[2], 0);
1156 } else {
1157 tgen_arithr(s, c, args[0], args[2]);
1158 }
1159 break;
1160 case INDEX_op_mul_i32:
1161 if (const_args[2]) {
1162 int32_t val;
1163 val = args[2];
1164 if (val == (int8_t)val) {
1165 tcg_out_modrm(s, OPC_IMUL_GvEvIb, args[0], args[0]);
1166 tcg_out8(s, val);
1167 } else {
1168 tcg_out_modrm(s, OPC_IMUL_GvEvIz, args[0], args[0]);
1169 tcg_out32(s, val);
1170 }
1171 } else {
1172 tcg_out_modrm(s, OPC_IMUL_GvEv, args[0], args[2]);
1173 }
1174 break;
1175 case INDEX_op_mulu2_i32:
1176 tcg_out_modrm(s, OPC_GRP3_Ev, EXT3_MUL, args[3]);
1177 break;
1178 case INDEX_op_div2_i32:
1179 tcg_out_modrm(s, OPC_GRP3_Ev, EXT3_IDIV, args[4]);
1180 break;
1181 case INDEX_op_divu2_i32:
1182 tcg_out_modrm(s, OPC_GRP3_Ev, EXT3_DIV, args[4]);
1183 break;
1184 case INDEX_op_shl_i32:
1185 c = SHIFT_SHL;
1186 gen_shift32:
1187 if (const_args[2]) {
1188 tcg_out_shifti(s, c, args[0], args[2]);
1189 } else {
1190 tcg_out_modrm(s, OPC_SHIFT_cl, c, args[0]);
1191 }
1192 break;
1193 case INDEX_op_shr_i32:
1194 c = SHIFT_SHR;
1195 goto gen_shift32;
1196 case INDEX_op_sar_i32:
1197 c = SHIFT_SAR;
1198 goto gen_shift32;
1199 case INDEX_op_rotl_i32:
1200 c = SHIFT_ROL;
1201 goto gen_shift32;
1202 case INDEX_op_rotr_i32:
1203 c = SHIFT_ROR;
1204 goto gen_shift32;
1205
1206 case INDEX_op_add2_i32:
1207 if (const_args[4]) {
1208 tgen_arithi(s, ARITH_ADD, args[0], args[4], 1);
1209 } else {
1210 tgen_arithr(s, ARITH_ADD, args[0], args[4]);
1211 }
1212 if (const_args[5]) {
1213 tgen_arithi(s, ARITH_ADC, args[1], args[5], 1);
1214 } else {
1215 tgen_arithr(s, ARITH_ADC, args[1], args[5]);
1216 }
1217 break;
1218 case INDEX_op_sub2_i32:
1219 if (const_args[4]) {
1220 tgen_arithi(s, ARITH_SUB, args[0], args[4], 1);
1221 } else {
1222 tgen_arithr(s, ARITH_SUB, args[0], args[4]);
1223 }
1224 if (const_args[5]) {
1225 tgen_arithi(s, ARITH_SBB, args[1], args[5], 1);
1226 } else {
1227 tgen_arithr(s, ARITH_SBB, args[1], args[5]);
1228 }
1229 break;
1230 case INDEX_op_brcond_i32:
1231 tcg_out_brcond(s, args[2], args[0], args[1], const_args[1],
1232 args[3], 0);
1233 break;
1234 case INDEX_op_brcond2_i32:
1235 tcg_out_brcond2(s, args, const_args, 0);
1236 break;
1237
1238 case INDEX_op_bswap16_i32:
1239 tcg_out_rolw_8(s, args[0]);
1240 break;
1241 case INDEX_op_bswap32_i32:
1242 tcg_out_bswap32(s, args[0]);
1243 break;
1244
1245 case INDEX_op_neg_i32:
1246 tcg_out_modrm(s, OPC_GRP3_Ev, EXT3_NEG, args[0]);
1247 break;
1248
1249 case INDEX_op_not_i32:
1250 tcg_out_modrm(s, OPC_GRP3_Ev, EXT3_NOT, args[0]);
1251 break;
1252
1253 case INDEX_op_ext8s_i32:
1254 tcg_out_ext8s(s, args[0], args[1]);
1255 break;
1256 case INDEX_op_ext16s_i32:
1257 tcg_out_ext16s(s, args[0], args[1]);
1258 break;
1259 case INDEX_op_ext8u_i32:
1260 tcg_out_ext8u(s, args[0], args[1]);
1261 break;
1262 case INDEX_op_ext16u_i32:
1263 tcg_out_ext16u(s, args[0], args[1]);
1264 break;
1265
1266 case INDEX_op_setcond_i32:
1267 tcg_out_setcond(s, args[3], args[0], args[1], args[2], const_args[2]);
1268 break;
1269 case INDEX_op_setcond2_i32:
1270 tcg_out_setcond2(s, args, const_args);
1271 break;
1272
1273 case INDEX_op_qemu_ld8u:
1274 tcg_out_qemu_ld(s, args, 0);
1275 break;
1276 case INDEX_op_qemu_ld8s:
1277 tcg_out_qemu_ld(s, args, 0 | 4);
1278 break;
1279 case INDEX_op_qemu_ld16u:
1280 tcg_out_qemu_ld(s, args, 1);
1281 break;
1282 case INDEX_op_qemu_ld16s:
1283 tcg_out_qemu_ld(s, args, 1 | 4);
1284 break;
1285 case INDEX_op_qemu_ld32:
1286 tcg_out_qemu_ld(s, args, 2);
1287 break;
1288 case INDEX_op_qemu_ld64:
1289 tcg_out_qemu_ld(s, args, 3);
1290 break;
1291
1292 case INDEX_op_qemu_st8:
1293 tcg_out_qemu_st(s, args, 0);
1294 break;
1295 case INDEX_op_qemu_st16:
1296 tcg_out_qemu_st(s, args, 1);
1297 break;
1298 case INDEX_op_qemu_st32:
1299 tcg_out_qemu_st(s, args, 2);
1300 break;
1301 case INDEX_op_qemu_st64:
1302 tcg_out_qemu_st(s, args, 3);
1303 break;
1304
1305 default:
1306 tcg_abort();
1307 }
1308 }
1309
1310 static const TCGTargetOpDef x86_op_defs[] = {
1311 { INDEX_op_exit_tb, { } },
1312 { INDEX_op_goto_tb, { } },
1313 { INDEX_op_call, { "ri" } },
1314 { INDEX_op_jmp, { "ri" } },
1315 { INDEX_op_br, { } },
1316 { INDEX_op_mov_i32, { "r", "r" } },
1317 { INDEX_op_movi_i32, { "r" } },
1318 { INDEX_op_ld8u_i32, { "r", "r" } },
1319 { INDEX_op_ld8s_i32, { "r", "r" } },
1320 { INDEX_op_ld16u_i32, { "r", "r" } },
1321 { INDEX_op_ld16s_i32, { "r", "r" } },
1322 { INDEX_op_ld_i32, { "r", "r" } },
1323 { INDEX_op_st8_i32, { "q", "r" } },
1324 { INDEX_op_st16_i32, { "r", "r" } },
1325 { INDEX_op_st_i32, { "r", "r" } },
1326
1327 { INDEX_op_add_i32, { "r", "0", "ri" } },
1328 { INDEX_op_sub_i32, { "r", "0", "ri" } },
1329 { INDEX_op_mul_i32, { "r", "0", "ri" } },
1330 { INDEX_op_mulu2_i32, { "a", "d", "a", "r" } },
1331 { INDEX_op_div2_i32, { "a", "d", "0", "1", "r" } },
1332 { INDEX_op_divu2_i32, { "a", "d", "0", "1", "r" } },
1333 { INDEX_op_and_i32, { "r", "0", "ri" } },
1334 { INDEX_op_or_i32, { "r", "0", "ri" } },
1335 { INDEX_op_xor_i32, { "r", "0", "ri" } },
1336
1337 { INDEX_op_shl_i32, { "r", "0", "ci" } },
1338 { INDEX_op_shr_i32, { "r", "0", "ci" } },
1339 { INDEX_op_sar_i32, { "r", "0", "ci" } },
1340 { INDEX_op_rotl_i32, { "r", "0", "ci" } },
1341 { INDEX_op_rotr_i32, { "r", "0", "ci" } },
1342
1343 { INDEX_op_brcond_i32, { "r", "ri" } },
1344
1345 { INDEX_op_add2_i32, { "r", "r", "0", "1", "ri", "ri" } },
1346 { INDEX_op_sub2_i32, { "r", "r", "0", "1", "ri", "ri" } },
1347 { INDEX_op_brcond2_i32, { "r", "r", "ri", "ri" } },
1348
1349 { INDEX_op_bswap16_i32, { "r", "0" } },
1350 { INDEX_op_bswap32_i32, { "r", "0" } },
1351
1352 { INDEX_op_neg_i32, { "r", "0" } },
1353
1354 { INDEX_op_not_i32, { "r", "0" } },
1355
1356 { INDEX_op_ext8s_i32, { "r", "q" } },
1357 { INDEX_op_ext16s_i32, { "r", "r" } },
1358 { INDEX_op_ext8u_i32, { "r", "q" } },
1359 { INDEX_op_ext16u_i32, { "r", "r" } },
1360
1361 { INDEX_op_setcond_i32, { "q", "r", "ri" } },
1362 { INDEX_op_setcond2_i32, { "r", "r", "r", "ri", "ri" } },
1363
1364 #if TARGET_LONG_BITS == 32
1365 { INDEX_op_qemu_ld8u, { "r", "L" } },
1366 { INDEX_op_qemu_ld8s, { "r", "L" } },
1367 { INDEX_op_qemu_ld16u, { "r", "L" } },
1368 { INDEX_op_qemu_ld16s, { "r", "L" } },
1369 { INDEX_op_qemu_ld32, { "r", "L" } },
1370 { INDEX_op_qemu_ld64, { "r", "r", "L" } },
1371
1372 { INDEX_op_qemu_st8, { "cb", "L" } },
1373 { INDEX_op_qemu_st16, { "L", "L" } },
1374 { INDEX_op_qemu_st32, { "L", "L" } },
1375 { INDEX_op_qemu_st64, { "L", "L", "L" } },
1376 #else
1377 { INDEX_op_qemu_ld8u, { "r", "L", "L" } },
1378 { INDEX_op_qemu_ld8s, { "r", "L", "L" } },
1379 { INDEX_op_qemu_ld16u, { "r", "L", "L" } },
1380 { INDEX_op_qemu_ld16s, { "r", "L", "L" } },
1381 { INDEX_op_qemu_ld32, { "r", "L", "L" } },
1382 { INDEX_op_qemu_ld64, { "r", "r", "L", "L" } },
1383
1384 { INDEX_op_qemu_st8, { "cb", "L", "L" } },
1385 { INDEX_op_qemu_st16, { "L", "L", "L" } },
1386 { INDEX_op_qemu_st32, { "L", "L", "L" } },
1387 { INDEX_op_qemu_st64, { "L", "L", "L", "L" } },
1388 #endif
1389 { -1 },
1390 };
1391
1392 static int tcg_target_callee_save_regs[] = {
1393 /* TCG_REG_EBP, */ /* currently used for the global env, so no
1394 need to save */
1395 TCG_REG_EBX,
1396 TCG_REG_ESI,
1397 TCG_REG_EDI,
1398 };
1399
1400 /* Generate global QEMU prologue and epilogue code */
1401 void tcg_target_qemu_prologue(TCGContext *s)
1402 {
1403 int i, frame_size, push_size, stack_addend;
1404
1405 /* TB prologue */
1406 /* save all callee saved registers */
1407 for(i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
1408 tcg_out_push(s, tcg_target_callee_save_regs[i]);
1409 }
1410 /* reserve some stack space */
1411 push_size = 4 + ARRAY_SIZE(tcg_target_callee_save_regs) * 4;
1412 frame_size = push_size + TCG_STATIC_CALL_ARGS_SIZE;
1413 frame_size = (frame_size + TCG_TARGET_STACK_ALIGN - 1) &
1414 ~(TCG_TARGET_STACK_ALIGN - 1);
1415 stack_addend = frame_size - push_size;
1416 tcg_out_addi(s, TCG_REG_ESP, -stack_addend);
1417
1418 tcg_out_modrm(s, OPC_GRP5, EXT5_JMPN_Ev, TCG_REG_EAX); /* jmp *%eax */
1419
1420 /* TB epilogue */
1421 tb_ret_addr = s->code_ptr;
1422 tcg_out_addi(s, TCG_REG_ESP, stack_addend);
1423 for(i = ARRAY_SIZE(tcg_target_callee_save_regs) - 1; i >= 0; i--) {
1424 tcg_out_pop(s, tcg_target_callee_save_regs[i]);
1425 }
1426 tcg_out_opc(s, OPC_RET);
1427 }
1428
1429 void tcg_target_init(TCGContext *s)
1430 {
1431 #if !defined(CONFIG_USER_ONLY)
1432 /* fail safe */
1433 if ((1 << CPU_TLB_ENTRY_BITS) != sizeof(CPUTLBEntry))
1434 tcg_abort();
1435 #endif
1436
1437 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xff);
1438
1439 tcg_regset_clear(tcg_target_call_clobber_regs);
1440 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_EAX);
1441 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_EDX);
1442 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_ECX);
1443
1444 tcg_regset_clear(s->reserved_regs);
1445 tcg_regset_set_reg(s->reserved_regs, TCG_REG_ESP);
1446
1447 tcg_add_target_add_op_defs(x86_op_defs);
1448 }