]> git.proxmox.com Git - qemu.git/blob - tcg/i386/tcg-target.c
Merge branch 'qspi.2' of git://developer.petalogix.com/public/qemu
[qemu.git] / tcg / i386 / tcg-target.c
1 /*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25 #ifndef NDEBUG
26 static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
27 #if TCG_TARGET_REG_BITS == 64
28 "%rax", "%rcx", "%rdx", "%rbx", "%rsp", "%rbp", "%rsi", "%rdi",
29 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
30 #else
31 "%eax", "%ecx", "%edx", "%ebx", "%esp", "%ebp", "%esi", "%edi",
32 #endif
33 };
34 #endif
35
36 static const int tcg_target_reg_alloc_order[] = {
37 #if TCG_TARGET_REG_BITS == 64
38 TCG_REG_RBP,
39 TCG_REG_RBX,
40 TCG_REG_R12,
41 TCG_REG_R13,
42 TCG_REG_R14,
43 TCG_REG_R15,
44 TCG_REG_R10,
45 TCG_REG_R11,
46 TCG_REG_R9,
47 TCG_REG_R8,
48 TCG_REG_RCX,
49 TCG_REG_RDX,
50 TCG_REG_RSI,
51 TCG_REG_RDI,
52 TCG_REG_RAX,
53 #else
54 TCG_REG_EBX,
55 TCG_REG_ESI,
56 TCG_REG_EDI,
57 TCG_REG_EBP,
58 TCG_REG_ECX,
59 TCG_REG_EDX,
60 TCG_REG_EAX,
61 #endif
62 };
63
64 static const int tcg_target_call_iarg_regs[] = {
65 #if TCG_TARGET_REG_BITS == 64
66 #if defined(_WIN64)
67 TCG_REG_RCX,
68 TCG_REG_RDX,
69 #else
70 TCG_REG_RDI,
71 TCG_REG_RSI,
72 TCG_REG_RDX,
73 TCG_REG_RCX,
74 #endif
75 TCG_REG_R8,
76 TCG_REG_R9,
77 #else
78 /* 32 bit mode uses stack based calling convention (GCC default). */
79 #endif
80 };
81
82 static const int tcg_target_call_oarg_regs[] = {
83 TCG_REG_EAX,
84 #if TCG_TARGET_REG_BITS == 32
85 TCG_REG_EDX
86 #endif
87 };
88
89 /* Registers used with L constraint, which are the first argument
90 registers on x86_64, and two random call clobbered registers on
91 i386. */
92 #if TCG_TARGET_REG_BITS == 64
93 # define TCG_REG_L0 tcg_target_call_iarg_regs[0]
94 # define TCG_REG_L1 tcg_target_call_iarg_regs[1]
95 #else
96 # define TCG_REG_L0 TCG_REG_EAX
97 # define TCG_REG_L1 TCG_REG_EDX
98 #endif
99
100 static uint8_t *tb_ret_addr;
101
102 static void patch_reloc(uint8_t *code_ptr, int type,
103 tcg_target_long value, tcg_target_long addend)
104 {
105 value += addend;
106 switch(type) {
107 case R_386_PC32:
108 value -= (uintptr_t)code_ptr;
109 if (value != (int32_t)value) {
110 tcg_abort();
111 }
112 *(uint32_t *)code_ptr = value;
113 break;
114 case R_386_PC8:
115 value -= (uintptr_t)code_ptr;
116 if (value != (int8_t)value) {
117 tcg_abort();
118 }
119 *(uint8_t *)code_ptr = value;
120 break;
121 default:
122 tcg_abort();
123 }
124 }
125
126 /* parse target specific constraints */
127 static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
128 {
129 const char *ct_str;
130
131 ct_str = *pct_str;
132 switch(ct_str[0]) {
133 case 'a':
134 ct->ct |= TCG_CT_REG;
135 tcg_regset_set_reg(ct->u.regs, TCG_REG_EAX);
136 break;
137 case 'b':
138 ct->ct |= TCG_CT_REG;
139 tcg_regset_set_reg(ct->u.regs, TCG_REG_EBX);
140 break;
141 case 'c':
142 ct->ct |= TCG_CT_REG;
143 tcg_regset_set_reg(ct->u.regs, TCG_REG_ECX);
144 break;
145 case 'd':
146 ct->ct |= TCG_CT_REG;
147 tcg_regset_set_reg(ct->u.regs, TCG_REG_EDX);
148 break;
149 case 'S':
150 ct->ct |= TCG_CT_REG;
151 tcg_regset_set_reg(ct->u.regs, TCG_REG_ESI);
152 break;
153 case 'D':
154 ct->ct |= TCG_CT_REG;
155 tcg_regset_set_reg(ct->u.regs, TCG_REG_EDI);
156 break;
157 case 'q':
158 ct->ct |= TCG_CT_REG;
159 if (TCG_TARGET_REG_BITS == 64) {
160 tcg_regset_set32(ct->u.regs, 0, 0xffff);
161 } else {
162 tcg_regset_set32(ct->u.regs, 0, 0xf);
163 }
164 break;
165 case 'Q':
166 ct->ct |= TCG_CT_REG;
167 tcg_regset_set32(ct->u.regs, 0, 0xf);
168 break;
169 case 'r':
170 ct->ct |= TCG_CT_REG;
171 if (TCG_TARGET_REG_BITS == 64) {
172 tcg_regset_set32(ct->u.regs, 0, 0xffff);
173 } else {
174 tcg_regset_set32(ct->u.regs, 0, 0xff);
175 }
176 break;
177
178 /* qemu_ld/st address constraint */
179 case 'L':
180 ct->ct |= TCG_CT_REG;
181 #if TCG_TARGET_REG_BITS == 64
182 tcg_regset_set32(ct->u.regs, 0, 0xffff);
183 #else
184 tcg_regset_set32(ct->u.regs, 0, 0xff);
185 #endif
186 tcg_regset_reset_reg(ct->u.regs, TCG_REG_L0);
187 tcg_regset_reset_reg(ct->u.regs, TCG_REG_L1);
188 break;
189
190 case 'e':
191 ct->ct |= TCG_CT_CONST_S32;
192 break;
193 case 'Z':
194 ct->ct |= TCG_CT_CONST_U32;
195 break;
196
197 default:
198 return -1;
199 }
200 ct_str++;
201 *pct_str = ct_str;
202 return 0;
203 }
204
205 /* test if a constant matches the constraint */
206 static inline int tcg_target_const_match(tcg_target_long val,
207 const TCGArgConstraint *arg_ct)
208 {
209 int ct = arg_ct->ct;
210 if (ct & TCG_CT_CONST) {
211 return 1;
212 }
213 if ((ct & TCG_CT_CONST_S32) && val == (int32_t)val) {
214 return 1;
215 }
216 if ((ct & TCG_CT_CONST_U32) && val == (uint32_t)val) {
217 return 1;
218 }
219 return 0;
220 }
221
222 #if TCG_TARGET_REG_BITS == 64
223 # define LOWREGMASK(x) ((x) & 7)
224 #else
225 # define LOWREGMASK(x) (x)
226 #endif
227
228 #define P_EXT 0x100 /* 0x0f opcode prefix */
229 #define P_DATA16 0x200 /* 0x66 opcode prefix */
230 #if TCG_TARGET_REG_BITS == 64
231 # define P_ADDR32 0x400 /* 0x67 opcode prefix */
232 # define P_REXW 0x800 /* Set REX.W = 1 */
233 # define P_REXB_R 0x1000 /* REG field as byte register */
234 # define P_REXB_RM 0x2000 /* R/M field as byte register */
235 # define P_GS 0x4000 /* gs segment override */
236 #else
237 # define P_ADDR32 0
238 # define P_REXW 0
239 # define P_REXB_R 0
240 # define P_REXB_RM 0
241 # define P_GS 0
242 #endif
243
244 #define OPC_ARITH_EvIz (0x81)
245 #define OPC_ARITH_EvIb (0x83)
246 #define OPC_ARITH_GvEv (0x03) /* ... plus (ARITH_FOO << 3) */
247 #define OPC_ADD_GvEv (OPC_ARITH_GvEv | (ARITH_ADD << 3))
248 #define OPC_BSWAP (0xc8 | P_EXT)
249 #define OPC_CALL_Jz (0xe8)
250 #define OPC_CMOVCC (0x40 | P_EXT) /* ... plus condition code */
251 #define OPC_CMP_GvEv (OPC_ARITH_GvEv | (ARITH_CMP << 3))
252 #define OPC_DEC_r32 (0x48)
253 #define OPC_IMUL_GvEv (0xaf | P_EXT)
254 #define OPC_IMUL_GvEvIb (0x6b)
255 #define OPC_IMUL_GvEvIz (0x69)
256 #define OPC_INC_r32 (0x40)
257 #define OPC_JCC_long (0x80 | P_EXT) /* ... plus condition code */
258 #define OPC_JCC_short (0x70) /* ... plus condition code */
259 #define OPC_JMP_long (0xe9)
260 #define OPC_JMP_short (0xeb)
261 #define OPC_LEA (0x8d)
262 #define OPC_MOVB_EvGv (0x88) /* stores, more or less */
263 #define OPC_MOVL_EvGv (0x89) /* stores, more or less */
264 #define OPC_MOVL_GvEv (0x8b) /* loads, more or less */
265 #define OPC_MOVB_EvIz (0xc6)
266 #define OPC_MOVL_EvIz (0xc7)
267 #define OPC_MOVL_Iv (0xb8)
268 #define OPC_MOVSBL (0xbe | P_EXT)
269 #define OPC_MOVSWL (0xbf | P_EXT)
270 #define OPC_MOVSLQ (0x63 | P_REXW)
271 #define OPC_MOVZBL (0xb6 | P_EXT)
272 #define OPC_MOVZWL (0xb7 | P_EXT)
273 #define OPC_POP_r32 (0x58)
274 #define OPC_PUSH_r32 (0x50)
275 #define OPC_PUSH_Iv (0x68)
276 #define OPC_PUSH_Ib (0x6a)
277 #define OPC_RET (0xc3)
278 #define OPC_SETCC (0x90 | P_EXT | P_REXB_RM) /* ... plus cc */
279 #define OPC_SHIFT_1 (0xd1)
280 #define OPC_SHIFT_Ib (0xc1)
281 #define OPC_SHIFT_cl (0xd3)
282 #define OPC_TESTL (0x85)
283 #define OPC_XCHG_ax_r32 (0x90)
284
285 #define OPC_GRP3_Ev (0xf7)
286 #define OPC_GRP5 (0xff)
287
288 /* Group 1 opcode extensions for 0x80-0x83.
289 These are also used as modifiers for OPC_ARITH. */
290 #define ARITH_ADD 0
291 #define ARITH_OR 1
292 #define ARITH_ADC 2
293 #define ARITH_SBB 3
294 #define ARITH_AND 4
295 #define ARITH_SUB 5
296 #define ARITH_XOR 6
297 #define ARITH_CMP 7
298
299 /* Group 2 opcode extensions for 0xc0, 0xc1, 0xd0-0xd3. */
300 #define SHIFT_ROL 0
301 #define SHIFT_ROR 1
302 #define SHIFT_SHL 4
303 #define SHIFT_SHR 5
304 #define SHIFT_SAR 7
305
306 /* Group 3 opcode extensions for 0xf6, 0xf7. To be used with OPC_GRP3. */
307 #define EXT3_NOT 2
308 #define EXT3_NEG 3
309 #define EXT3_MUL 4
310 #define EXT3_IMUL 5
311 #define EXT3_DIV 6
312 #define EXT3_IDIV 7
313
314 /* Group 5 opcode extensions for 0xff. To be used with OPC_GRP5. */
315 #define EXT5_INC_Ev 0
316 #define EXT5_DEC_Ev 1
317 #define EXT5_CALLN_Ev 2
318 #define EXT5_JMPN_Ev 4
319
320 /* Condition codes to be added to OPC_JCC_{long,short}. */
321 #define JCC_JMP (-1)
322 #define JCC_JO 0x0
323 #define JCC_JNO 0x1
324 #define JCC_JB 0x2
325 #define JCC_JAE 0x3
326 #define JCC_JE 0x4
327 #define JCC_JNE 0x5
328 #define JCC_JBE 0x6
329 #define JCC_JA 0x7
330 #define JCC_JS 0x8
331 #define JCC_JNS 0x9
332 #define JCC_JP 0xa
333 #define JCC_JNP 0xb
334 #define JCC_JL 0xc
335 #define JCC_JGE 0xd
336 #define JCC_JLE 0xe
337 #define JCC_JG 0xf
338
339 static const uint8_t tcg_cond_to_jcc[] = {
340 [TCG_COND_EQ] = JCC_JE,
341 [TCG_COND_NE] = JCC_JNE,
342 [TCG_COND_LT] = JCC_JL,
343 [TCG_COND_GE] = JCC_JGE,
344 [TCG_COND_LE] = JCC_JLE,
345 [TCG_COND_GT] = JCC_JG,
346 [TCG_COND_LTU] = JCC_JB,
347 [TCG_COND_GEU] = JCC_JAE,
348 [TCG_COND_LEU] = JCC_JBE,
349 [TCG_COND_GTU] = JCC_JA,
350 };
351
352 #if TCG_TARGET_REG_BITS == 64
353 static void tcg_out_opc(TCGContext *s, int opc, int r, int rm, int x)
354 {
355 int rex;
356
357 if (opc & P_GS) {
358 tcg_out8(s, 0x65);
359 }
360 if (opc & P_DATA16) {
361 /* We should never be asking for both 16 and 64-bit operation. */
362 assert((opc & P_REXW) == 0);
363 tcg_out8(s, 0x66);
364 }
365 if (opc & P_ADDR32) {
366 tcg_out8(s, 0x67);
367 }
368
369 rex = 0;
370 rex |= (opc & P_REXW) >> 8; /* REX.W */
371 rex |= (r & 8) >> 1; /* REX.R */
372 rex |= (x & 8) >> 2; /* REX.X */
373 rex |= (rm & 8) >> 3; /* REX.B */
374
375 /* P_REXB_{R,RM} indicates that the given register is the low byte.
376 For %[abcd]l we need no REX prefix, but for %{si,di,bp,sp}l we do,
377 as otherwise the encoding indicates %[abcd]h. Note that the values
378 that are ORed in merely indicate that the REX byte must be present;
379 those bits get discarded in output. */
380 rex |= opc & (r >= 4 ? P_REXB_R : 0);
381 rex |= opc & (rm >= 4 ? P_REXB_RM : 0);
382
383 if (rex) {
384 tcg_out8(s, (uint8_t)(rex | 0x40));
385 }
386
387 if (opc & P_EXT) {
388 tcg_out8(s, 0x0f);
389 }
390 tcg_out8(s, opc);
391 }
392 #else
393 static void tcg_out_opc(TCGContext *s, int opc)
394 {
395 if (opc & P_DATA16) {
396 tcg_out8(s, 0x66);
397 }
398 if (opc & P_EXT) {
399 tcg_out8(s, 0x0f);
400 }
401 tcg_out8(s, opc);
402 }
403 /* Discard the register arguments to tcg_out_opc early, so as not to penalize
404 the 32-bit compilation paths. This method works with all versions of gcc,
405 whereas relying on optimization may not be able to exclude them. */
406 #define tcg_out_opc(s, opc, r, rm, x) (tcg_out_opc)(s, opc)
407 #endif
408
409 static void tcg_out_modrm(TCGContext *s, int opc, int r, int rm)
410 {
411 tcg_out_opc(s, opc, r, rm, 0);
412 tcg_out8(s, 0xc0 | (LOWREGMASK(r) << 3) | LOWREGMASK(rm));
413 }
414
415 /* Output an opcode with a full "rm + (index<<shift) + offset" address mode.
416 We handle either RM and INDEX missing with a negative value. In 64-bit
417 mode for absolute addresses, ~RM is the size of the immediate operand
418 that will follow the instruction. */
419
420 static void tcg_out_modrm_sib_offset(TCGContext *s, int opc, int r, int rm,
421 int index, int shift,
422 tcg_target_long offset)
423 {
424 int mod, len;
425
426 if (index < 0 && rm < 0) {
427 if (TCG_TARGET_REG_BITS == 64) {
428 /* Try for a rip-relative addressing mode. This has replaced
429 the 32-bit-mode absolute addressing encoding. */
430 tcg_target_long pc = (tcg_target_long)s->code_ptr + 5 + ~rm;
431 tcg_target_long disp = offset - pc;
432 if (disp == (int32_t)disp) {
433 tcg_out_opc(s, opc, r, 0, 0);
434 tcg_out8(s, (LOWREGMASK(r) << 3) | 5);
435 tcg_out32(s, disp);
436 return;
437 }
438
439 /* Try for an absolute address encoding. This requires the
440 use of the MODRM+SIB encoding and is therefore larger than
441 rip-relative addressing. */
442 if (offset == (int32_t)offset) {
443 tcg_out_opc(s, opc, r, 0, 0);
444 tcg_out8(s, (LOWREGMASK(r) << 3) | 4);
445 tcg_out8(s, (4 << 3) | 5);
446 tcg_out32(s, offset);
447 return;
448 }
449
450 /* ??? The memory isn't directly addressable. */
451 tcg_abort();
452 } else {
453 /* Absolute address. */
454 tcg_out_opc(s, opc, r, 0, 0);
455 tcg_out8(s, (r << 3) | 5);
456 tcg_out32(s, offset);
457 return;
458 }
459 }
460
461 /* Find the length of the immediate addend. Note that the encoding
462 that would be used for (%ebp) indicates absolute addressing. */
463 if (rm < 0) {
464 mod = 0, len = 4, rm = 5;
465 } else if (offset == 0 && LOWREGMASK(rm) != TCG_REG_EBP) {
466 mod = 0, len = 0;
467 } else if (offset == (int8_t)offset) {
468 mod = 0x40, len = 1;
469 } else {
470 mod = 0x80, len = 4;
471 }
472
473 /* Use a single byte MODRM format if possible. Note that the encoding
474 that would be used for %esp is the escape to the two byte form. */
475 if (index < 0 && LOWREGMASK(rm) != TCG_REG_ESP) {
476 /* Single byte MODRM format. */
477 tcg_out_opc(s, opc, r, rm, 0);
478 tcg_out8(s, mod | (LOWREGMASK(r) << 3) | LOWREGMASK(rm));
479 } else {
480 /* Two byte MODRM+SIB format. */
481
482 /* Note that the encoding that would place %esp into the index
483 field indicates no index register. In 64-bit mode, the REX.X
484 bit counts, so %r12 can be used as the index. */
485 if (index < 0) {
486 index = 4;
487 } else {
488 assert(index != TCG_REG_ESP);
489 }
490
491 tcg_out_opc(s, opc, r, rm, index);
492 tcg_out8(s, mod | (LOWREGMASK(r) << 3) | 4);
493 tcg_out8(s, (shift << 6) | (LOWREGMASK(index) << 3) | LOWREGMASK(rm));
494 }
495
496 if (len == 1) {
497 tcg_out8(s, offset);
498 } else if (len == 4) {
499 tcg_out32(s, offset);
500 }
501 }
502
503 /* A simplification of the above with no index or shift. */
504 static inline void tcg_out_modrm_offset(TCGContext *s, int opc, int r,
505 int rm, tcg_target_long offset)
506 {
507 tcg_out_modrm_sib_offset(s, opc, r, rm, -1, 0, offset);
508 }
509
510 /* Generate dest op= src. Uses the same ARITH_* codes as tgen_arithi. */
511 static inline void tgen_arithr(TCGContext *s, int subop, int dest, int src)
512 {
513 /* Propagate an opcode prefix, such as P_REXW. */
514 int ext = subop & ~0x7;
515 subop &= 0x7;
516
517 tcg_out_modrm(s, OPC_ARITH_GvEv + (subop << 3) + ext, dest, src);
518 }
519
520 static inline void tcg_out_mov(TCGContext *s, TCGType type,
521 TCGReg ret, TCGReg arg)
522 {
523 if (arg != ret) {
524 int opc = OPC_MOVL_GvEv + (type == TCG_TYPE_I64 ? P_REXW : 0);
525 tcg_out_modrm(s, opc, ret, arg);
526 }
527 }
528
529 static void tcg_out_movi(TCGContext *s, TCGType type,
530 TCGReg ret, tcg_target_long arg)
531 {
532 if (arg == 0) {
533 tgen_arithr(s, ARITH_XOR, ret, ret);
534 return;
535 } else if (arg == (uint32_t)arg || type == TCG_TYPE_I32) {
536 tcg_out_opc(s, OPC_MOVL_Iv + LOWREGMASK(ret), 0, ret, 0);
537 tcg_out32(s, arg);
538 } else if (arg == (int32_t)arg) {
539 tcg_out_modrm(s, OPC_MOVL_EvIz + P_REXW, 0, ret);
540 tcg_out32(s, arg);
541 } else {
542 tcg_out_opc(s, OPC_MOVL_Iv + P_REXW + LOWREGMASK(ret), 0, ret, 0);
543 tcg_out32(s, arg);
544 tcg_out32(s, arg >> 31 >> 1);
545 }
546 }
547
548 static inline void tcg_out_pushi(TCGContext *s, tcg_target_long val)
549 {
550 if (val == (int8_t)val) {
551 tcg_out_opc(s, OPC_PUSH_Ib, 0, 0, 0);
552 tcg_out8(s, val);
553 } else if (val == (int32_t)val) {
554 tcg_out_opc(s, OPC_PUSH_Iv, 0, 0, 0);
555 tcg_out32(s, val);
556 } else {
557 tcg_abort();
558 }
559 }
560
561 static inline void tcg_out_push(TCGContext *s, int reg)
562 {
563 tcg_out_opc(s, OPC_PUSH_r32 + LOWREGMASK(reg), 0, reg, 0);
564 }
565
566 static inline void tcg_out_pop(TCGContext *s, int reg)
567 {
568 tcg_out_opc(s, OPC_POP_r32 + LOWREGMASK(reg), 0, reg, 0);
569 }
570
571 static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret,
572 TCGReg arg1, tcg_target_long arg2)
573 {
574 int opc = OPC_MOVL_GvEv + (type == TCG_TYPE_I64 ? P_REXW : 0);
575 tcg_out_modrm_offset(s, opc, ret, arg1, arg2);
576 }
577
578 static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
579 TCGReg arg1, tcg_target_long arg2)
580 {
581 int opc = OPC_MOVL_EvGv + (type == TCG_TYPE_I64 ? P_REXW : 0);
582 tcg_out_modrm_offset(s, opc, arg, arg1, arg2);
583 }
584
585 static void tcg_out_shifti(TCGContext *s, int subopc, int reg, int count)
586 {
587 /* Propagate an opcode prefix, such as P_DATA16. */
588 int ext = subopc & ~0x7;
589 subopc &= 0x7;
590
591 if (count == 1) {
592 tcg_out_modrm(s, OPC_SHIFT_1 + ext, subopc, reg);
593 } else {
594 tcg_out_modrm(s, OPC_SHIFT_Ib + ext, subopc, reg);
595 tcg_out8(s, count);
596 }
597 }
598
599 static inline void tcg_out_bswap32(TCGContext *s, int reg)
600 {
601 tcg_out_opc(s, OPC_BSWAP + LOWREGMASK(reg), 0, reg, 0);
602 }
603
604 static inline void tcg_out_rolw_8(TCGContext *s, int reg)
605 {
606 tcg_out_shifti(s, SHIFT_ROL + P_DATA16, reg, 8);
607 }
608
609 static inline void tcg_out_ext8u(TCGContext *s, int dest, int src)
610 {
611 /* movzbl */
612 assert(src < 4 || TCG_TARGET_REG_BITS == 64);
613 tcg_out_modrm(s, OPC_MOVZBL + P_REXB_RM, dest, src);
614 }
615
616 static void tcg_out_ext8s(TCGContext *s, int dest, int src, int rexw)
617 {
618 /* movsbl */
619 assert(src < 4 || TCG_TARGET_REG_BITS == 64);
620 tcg_out_modrm(s, OPC_MOVSBL + P_REXB_RM + rexw, dest, src);
621 }
622
623 static inline void tcg_out_ext16u(TCGContext *s, int dest, int src)
624 {
625 /* movzwl */
626 tcg_out_modrm(s, OPC_MOVZWL, dest, src);
627 }
628
629 static inline void tcg_out_ext16s(TCGContext *s, int dest, int src, int rexw)
630 {
631 /* movsw[lq] */
632 tcg_out_modrm(s, OPC_MOVSWL + rexw, dest, src);
633 }
634
635 static inline void tcg_out_ext32u(TCGContext *s, int dest, int src)
636 {
637 /* 32-bit mov zero extends. */
638 tcg_out_modrm(s, OPC_MOVL_GvEv, dest, src);
639 }
640
641 static inline void tcg_out_ext32s(TCGContext *s, int dest, int src)
642 {
643 tcg_out_modrm(s, OPC_MOVSLQ, dest, src);
644 }
645
646 static inline void tcg_out_bswap64(TCGContext *s, int reg)
647 {
648 tcg_out_opc(s, OPC_BSWAP + P_REXW + LOWREGMASK(reg), 0, reg, 0);
649 }
650
651 static void tgen_arithi(TCGContext *s, int c, int r0,
652 tcg_target_long val, int cf)
653 {
654 int rexw = 0;
655
656 if (TCG_TARGET_REG_BITS == 64) {
657 rexw = c & -8;
658 c &= 7;
659 }
660
661 /* ??? While INC is 2 bytes shorter than ADDL $1, they also induce
662 partial flags update stalls on Pentium4 and are not recommended
663 by current Intel optimization manuals. */
664 if (!cf && (c == ARITH_ADD || c == ARITH_SUB) && (val == 1 || val == -1)) {
665 int is_inc = (c == ARITH_ADD) ^ (val < 0);
666 if (TCG_TARGET_REG_BITS == 64) {
667 /* The single-byte increment encodings are re-tasked as the
668 REX prefixes. Use the MODRM encoding. */
669 tcg_out_modrm(s, OPC_GRP5 + rexw,
670 (is_inc ? EXT5_INC_Ev : EXT5_DEC_Ev), r0);
671 } else {
672 tcg_out8(s, (is_inc ? OPC_INC_r32 : OPC_DEC_r32) + r0);
673 }
674 return;
675 }
676
677 if (c == ARITH_AND) {
678 if (TCG_TARGET_REG_BITS == 64) {
679 if (val == 0xffffffffu) {
680 tcg_out_ext32u(s, r0, r0);
681 return;
682 }
683 if (val == (uint32_t)val) {
684 /* AND with no high bits set can use a 32-bit operation. */
685 rexw = 0;
686 }
687 }
688 if (val == 0xffu && (r0 < 4 || TCG_TARGET_REG_BITS == 64)) {
689 tcg_out_ext8u(s, r0, r0);
690 return;
691 }
692 if (val == 0xffffu) {
693 tcg_out_ext16u(s, r0, r0);
694 return;
695 }
696 }
697
698 if (val == (int8_t)val) {
699 tcg_out_modrm(s, OPC_ARITH_EvIb + rexw, c, r0);
700 tcg_out8(s, val);
701 return;
702 }
703 if (rexw == 0 || val == (int32_t)val) {
704 tcg_out_modrm(s, OPC_ARITH_EvIz + rexw, c, r0);
705 tcg_out32(s, val);
706 return;
707 }
708
709 tcg_abort();
710 }
711
712 static void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val)
713 {
714 if (val != 0) {
715 tgen_arithi(s, ARITH_ADD + P_REXW, reg, val, 0);
716 }
717 }
718
719 /* Use SMALL != 0 to force a short forward branch. */
720 static void tcg_out_jxx(TCGContext *s, int opc, int label_index, int small)
721 {
722 int32_t val, val1;
723 TCGLabel *l = &s->labels[label_index];
724
725 if (l->has_value) {
726 val = l->u.value - (tcg_target_long)s->code_ptr;
727 val1 = val - 2;
728 if ((int8_t)val1 == val1) {
729 if (opc == -1) {
730 tcg_out8(s, OPC_JMP_short);
731 } else {
732 tcg_out8(s, OPC_JCC_short + opc);
733 }
734 tcg_out8(s, val1);
735 } else {
736 if (small) {
737 tcg_abort();
738 }
739 if (opc == -1) {
740 tcg_out8(s, OPC_JMP_long);
741 tcg_out32(s, val - 5);
742 } else {
743 tcg_out_opc(s, OPC_JCC_long + opc, 0, 0, 0);
744 tcg_out32(s, val - 6);
745 }
746 }
747 } else if (small) {
748 if (opc == -1) {
749 tcg_out8(s, OPC_JMP_short);
750 } else {
751 tcg_out8(s, OPC_JCC_short + opc);
752 }
753 tcg_out_reloc(s, s->code_ptr, R_386_PC8, label_index, -1);
754 s->code_ptr += 1;
755 } else {
756 if (opc == -1) {
757 tcg_out8(s, OPC_JMP_long);
758 } else {
759 tcg_out_opc(s, OPC_JCC_long + opc, 0, 0, 0);
760 }
761 tcg_out_reloc(s, s->code_ptr, R_386_PC32, label_index, -4);
762 s->code_ptr += 4;
763 }
764 }
765
766 static void tcg_out_cmp(TCGContext *s, TCGArg arg1, TCGArg arg2,
767 int const_arg2, int rexw)
768 {
769 if (const_arg2) {
770 if (arg2 == 0) {
771 /* test r, r */
772 tcg_out_modrm(s, OPC_TESTL + rexw, arg1, arg1);
773 } else {
774 tgen_arithi(s, ARITH_CMP + rexw, arg1, arg2, 0);
775 }
776 } else {
777 tgen_arithr(s, ARITH_CMP + rexw, arg1, arg2);
778 }
779 }
780
781 static void tcg_out_brcond32(TCGContext *s, TCGCond cond,
782 TCGArg arg1, TCGArg arg2, int const_arg2,
783 int label_index, int small)
784 {
785 tcg_out_cmp(s, arg1, arg2, const_arg2, 0);
786 tcg_out_jxx(s, tcg_cond_to_jcc[cond], label_index, small);
787 }
788
789 #if TCG_TARGET_REG_BITS == 64
790 static void tcg_out_brcond64(TCGContext *s, TCGCond cond,
791 TCGArg arg1, TCGArg arg2, int const_arg2,
792 int label_index, int small)
793 {
794 tcg_out_cmp(s, arg1, arg2, const_arg2, P_REXW);
795 tcg_out_jxx(s, tcg_cond_to_jcc[cond], label_index, small);
796 }
797 #else
798 /* XXX: we implement it at the target level to avoid having to
799 handle cross basic blocks temporaries */
800 static void tcg_out_brcond2(TCGContext *s, const TCGArg *args,
801 const int *const_args, int small)
802 {
803 int label_next;
804 label_next = gen_new_label();
805 switch(args[4]) {
806 case TCG_COND_EQ:
807 tcg_out_brcond32(s, TCG_COND_NE, args[0], args[2], const_args[2],
808 label_next, 1);
809 tcg_out_brcond32(s, TCG_COND_EQ, args[1], args[3], const_args[3],
810 args[5], small);
811 break;
812 case TCG_COND_NE:
813 tcg_out_brcond32(s, TCG_COND_NE, args[0], args[2], const_args[2],
814 args[5], small);
815 tcg_out_brcond32(s, TCG_COND_NE, args[1], args[3], const_args[3],
816 args[5], small);
817 break;
818 case TCG_COND_LT:
819 tcg_out_brcond32(s, TCG_COND_LT, args[1], args[3], const_args[3],
820 args[5], small);
821 tcg_out_jxx(s, JCC_JNE, label_next, 1);
822 tcg_out_brcond32(s, TCG_COND_LTU, args[0], args[2], const_args[2],
823 args[5], small);
824 break;
825 case TCG_COND_LE:
826 tcg_out_brcond32(s, TCG_COND_LT, args[1], args[3], const_args[3],
827 args[5], small);
828 tcg_out_jxx(s, JCC_JNE, label_next, 1);
829 tcg_out_brcond32(s, TCG_COND_LEU, args[0], args[2], const_args[2],
830 args[5], small);
831 break;
832 case TCG_COND_GT:
833 tcg_out_brcond32(s, TCG_COND_GT, args[1], args[3], const_args[3],
834 args[5], small);
835 tcg_out_jxx(s, JCC_JNE, label_next, 1);
836 tcg_out_brcond32(s, TCG_COND_GTU, args[0], args[2], const_args[2],
837 args[5], small);
838 break;
839 case TCG_COND_GE:
840 tcg_out_brcond32(s, TCG_COND_GT, args[1], args[3], const_args[3],
841 args[5], small);
842 tcg_out_jxx(s, JCC_JNE, label_next, 1);
843 tcg_out_brcond32(s, TCG_COND_GEU, args[0], args[2], const_args[2],
844 args[5], small);
845 break;
846 case TCG_COND_LTU:
847 tcg_out_brcond32(s, TCG_COND_LTU, args[1], args[3], const_args[3],
848 args[5], small);
849 tcg_out_jxx(s, JCC_JNE, label_next, 1);
850 tcg_out_brcond32(s, TCG_COND_LTU, args[0], args[2], const_args[2],
851 args[5], small);
852 break;
853 case TCG_COND_LEU:
854 tcg_out_brcond32(s, TCG_COND_LTU, args[1], args[3], const_args[3],
855 args[5], small);
856 tcg_out_jxx(s, JCC_JNE, label_next, 1);
857 tcg_out_brcond32(s, TCG_COND_LEU, args[0], args[2], const_args[2],
858 args[5], small);
859 break;
860 case TCG_COND_GTU:
861 tcg_out_brcond32(s, TCG_COND_GTU, args[1], args[3], const_args[3],
862 args[5], small);
863 tcg_out_jxx(s, JCC_JNE, label_next, 1);
864 tcg_out_brcond32(s, TCG_COND_GTU, args[0], args[2], const_args[2],
865 args[5], small);
866 break;
867 case TCG_COND_GEU:
868 tcg_out_brcond32(s, TCG_COND_GTU, args[1], args[3], const_args[3],
869 args[5], small);
870 tcg_out_jxx(s, JCC_JNE, label_next, 1);
871 tcg_out_brcond32(s, TCG_COND_GEU, args[0], args[2], const_args[2],
872 args[5], small);
873 break;
874 default:
875 tcg_abort();
876 }
877 tcg_out_label(s, label_next, s->code_ptr);
878 }
879 #endif
880
881 static void tcg_out_setcond32(TCGContext *s, TCGCond cond, TCGArg dest,
882 TCGArg arg1, TCGArg arg2, int const_arg2)
883 {
884 tcg_out_cmp(s, arg1, arg2, const_arg2, 0);
885 tcg_out_modrm(s, OPC_SETCC | tcg_cond_to_jcc[cond], 0, dest);
886 tcg_out_ext8u(s, dest, dest);
887 }
888
889 #if TCG_TARGET_REG_BITS == 64
890 static void tcg_out_setcond64(TCGContext *s, TCGCond cond, TCGArg dest,
891 TCGArg arg1, TCGArg arg2, int const_arg2)
892 {
893 tcg_out_cmp(s, arg1, arg2, const_arg2, P_REXW);
894 tcg_out_modrm(s, OPC_SETCC | tcg_cond_to_jcc[cond], 0, dest);
895 tcg_out_ext8u(s, dest, dest);
896 }
897 #else
898 static void tcg_out_setcond2(TCGContext *s, const TCGArg *args,
899 const int *const_args)
900 {
901 TCGArg new_args[6];
902 int label_true, label_over;
903
904 memcpy(new_args, args+1, 5*sizeof(TCGArg));
905
906 if (args[0] == args[1] || args[0] == args[2]
907 || (!const_args[3] && args[0] == args[3])
908 || (!const_args[4] && args[0] == args[4])) {
909 /* When the destination overlaps with one of the argument
910 registers, don't do anything tricky. */
911 label_true = gen_new_label();
912 label_over = gen_new_label();
913
914 new_args[5] = label_true;
915 tcg_out_brcond2(s, new_args, const_args+1, 1);
916
917 tcg_out_movi(s, TCG_TYPE_I32, args[0], 0);
918 tcg_out_jxx(s, JCC_JMP, label_over, 1);
919 tcg_out_label(s, label_true, s->code_ptr);
920
921 tcg_out_movi(s, TCG_TYPE_I32, args[0], 1);
922 tcg_out_label(s, label_over, s->code_ptr);
923 } else {
924 /* When the destination does not overlap one of the arguments,
925 clear the destination first, jump if cond false, and emit an
926 increment in the true case. This results in smaller code. */
927
928 tcg_out_movi(s, TCG_TYPE_I32, args[0], 0);
929
930 label_over = gen_new_label();
931 new_args[4] = tcg_invert_cond(new_args[4]);
932 new_args[5] = label_over;
933 tcg_out_brcond2(s, new_args, const_args+1, 1);
934
935 tgen_arithi(s, ARITH_ADD, args[0], 1, 0);
936 tcg_out_label(s, label_over, s->code_ptr);
937 }
938 }
939 #endif
940
941 static void tcg_out_movcond32(TCGContext *s, TCGCond cond, TCGArg dest,
942 TCGArg c1, TCGArg c2, int const_c2,
943 TCGArg v1)
944 {
945 tcg_out_cmp(s, c1, c2, const_c2, 0);
946 tcg_out_modrm(s, OPC_CMOVCC | tcg_cond_to_jcc[cond], dest, v1);
947 }
948
949 #if TCG_TARGET_REG_BITS == 64
950 static void tcg_out_movcond64(TCGContext *s, TCGCond cond, TCGArg dest,
951 TCGArg c1, TCGArg c2, int const_c2,
952 TCGArg v1)
953 {
954 tcg_out_cmp(s, c1, c2, const_c2, P_REXW);
955 tcg_out_modrm(s, OPC_CMOVCC | tcg_cond_to_jcc[cond] | P_REXW, dest, v1);
956 }
957 #endif
958
959 static void tcg_out_branch(TCGContext *s, int call, tcg_target_long dest)
960 {
961 tcg_target_long disp = dest - (tcg_target_long)s->code_ptr - 5;
962
963 if (disp == (int32_t)disp) {
964 tcg_out_opc(s, call ? OPC_CALL_Jz : OPC_JMP_long, 0, 0, 0);
965 tcg_out32(s, disp);
966 } else {
967 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R10, dest);
968 tcg_out_modrm(s, OPC_GRP5,
969 call ? EXT5_CALLN_Ev : EXT5_JMPN_Ev, TCG_REG_R10);
970 }
971 }
972
973 static inline void tcg_out_calli(TCGContext *s, tcg_target_long dest)
974 {
975 tcg_out_branch(s, 1, dest);
976 }
977
978 static void tcg_out_jmp(TCGContext *s, tcg_target_long dest)
979 {
980 tcg_out_branch(s, 0, dest);
981 }
982
983 #if defined(CONFIG_SOFTMMU)
984
985 #include "../../softmmu_defs.h"
986
987 /* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
988 int mmu_idx) */
989 static const void *qemu_ld_helpers[4] = {
990 helper_ldb_mmu,
991 helper_ldw_mmu,
992 helper_ldl_mmu,
993 helper_ldq_mmu,
994 };
995
996 /* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
997 uintxx_t val, int mmu_idx) */
998 static const void *qemu_st_helpers[4] = {
999 helper_stb_mmu,
1000 helper_stw_mmu,
1001 helper_stl_mmu,
1002 helper_stq_mmu,
1003 };
1004
1005 /* Perform the TLB load and compare.
1006
1007 Inputs:
1008 ADDRLO_IDX contains the index into ARGS of the low part of the
1009 address; the high part of the address is at ADDR_LOW_IDX+1.
1010
1011 MEM_INDEX and S_BITS are the memory context and log2 size of the load.
1012
1013 WHICH is the offset into the CPUTLBEntry structure of the slot to read.
1014 This should be offsetof addr_read or addr_write.
1015
1016 Outputs:
1017 LABEL_PTRS is filled with 1 (32-bit addresses) or 2 (64-bit addresses)
1018 positions of the displacements of forward jumps to the TLB miss case.
1019
1020 Second argument register is loaded with the low part of the address.
1021 In the TLB hit case, it has been adjusted as indicated by the TLB
1022 and so is a host address. In the TLB miss case, it continues to
1023 hold a guest address.
1024
1025 First argument register is clobbered. */
1026
1027 static inline void tcg_out_tlb_load(TCGContext *s, int addrlo_idx,
1028 int mem_index, int s_bits,
1029 const TCGArg *args,
1030 uint8_t **label_ptr, int which)
1031 {
1032 const int addrlo = args[addrlo_idx];
1033 const int r0 = TCG_REG_L0;
1034 const int r1 = TCG_REG_L1;
1035 TCGType type = TCG_TYPE_I32;
1036 int rexw = 0;
1037
1038 if (TCG_TARGET_REG_BITS == 64 && TARGET_LONG_BITS == 64) {
1039 type = TCG_TYPE_I64;
1040 rexw = P_REXW;
1041 }
1042
1043 tcg_out_mov(s, type, r0, addrlo);
1044 tcg_out_mov(s, type, r1, addrlo);
1045
1046 tcg_out_shifti(s, SHIFT_SHR + rexw, r0,
1047 TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
1048
1049 tgen_arithi(s, ARITH_AND + rexw, r1,
1050 TARGET_PAGE_MASK | ((1 << s_bits) - 1), 0);
1051 tgen_arithi(s, ARITH_AND + rexw, r0,
1052 (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS, 0);
1053
1054 tcg_out_modrm_sib_offset(s, OPC_LEA + P_REXW, r0, TCG_AREG0, r0, 0,
1055 offsetof(CPUArchState, tlb_table[mem_index][0])
1056 + which);
1057
1058 /* cmp 0(r0), r1 */
1059 tcg_out_modrm_offset(s, OPC_CMP_GvEv + rexw, r1, r0, 0);
1060
1061 tcg_out_mov(s, type, r1, addrlo);
1062
1063 /* jne label1 */
1064 tcg_out8(s, OPC_JCC_short + JCC_JNE);
1065 label_ptr[0] = s->code_ptr;
1066 s->code_ptr++;
1067
1068 if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
1069 /* cmp 4(r0), addrhi */
1070 tcg_out_modrm_offset(s, OPC_CMP_GvEv, args[addrlo_idx+1], r0, 4);
1071
1072 /* jne label1 */
1073 tcg_out8(s, OPC_JCC_short + JCC_JNE);
1074 label_ptr[1] = s->code_ptr;
1075 s->code_ptr++;
1076 }
1077
1078 /* TLB Hit. */
1079
1080 /* add addend(r0), r1 */
1081 tcg_out_modrm_offset(s, OPC_ADD_GvEv + P_REXW, r1, r0,
1082 offsetof(CPUTLBEntry, addend) - which);
1083 }
1084 #elif defined(__x86_64__) && defined(__linux__)
1085 # include <asm/prctl.h>
1086 # include <sys/prctl.h>
1087
1088 int arch_prctl(int code, unsigned long addr);
1089
1090 static int guest_base_flags;
1091 static inline void setup_guest_base_seg(void)
1092 {
1093 if (arch_prctl(ARCH_SET_GS, GUEST_BASE) == 0) {
1094 guest_base_flags = P_GS;
1095 }
1096 }
1097 #else
1098 # define guest_base_flags 0
1099 static inline void setup_guest_base_seg(void) { }
1100 #endif /* SOFTMMU */
1101
1102 static void tcg_out_qemu_ld_direct(TCGContext *s, int datalo, int datahi,
1103 int base, tcg_target_long ofs, int seg,
1104 int sizeop)
1105 {
1106 #ifdef TARGET_WORDS_BIGENDIAN
1107 const int bswap = 1;
1108 #else
1109 const int bswap = 0;
1110 #endif
1111 switch (sizeop) {
1112 case 0:
1113 tcg_out_modrm_offset(s, OPC_MOVZBL + seg, datalo, base, ofs);
1114 break;
1115 case 0 | 4:
1116 tcg_out_modrm_offset(s, OPC_MOVSBL + P_REXW + seg, datalo, base, ofs);
1117 break;
1118 case 1:
1119 tcg_out_modrm_offset(s, OPC_MOVZWL + seg, datalo, base, ofs);
1120 if (bswap) {
1121 tcg_out_rolw_8(s, datalo);
1122 }
1123 break;
1124 case 1 | 4:
1125 if (bswap) {
1126 tcg_out_modrm_offset(s, OPC_MOVZWL + seg, datalo, base, ofs);
1127 tcg_out_rolw_8(s, datalo);
1128 tcg_out_modrm(s, OPC_MOVSWL + P_REXW, datalo, datalo);
1129 } else {
1130 tcg_out_modrm_offset(s, OPC_MOVSWL + P_REXW + seg,
1131 datalo, base, ofs);
1132 }
1133 break;
1134 case 2:
1135 tcg_out_modrm_offset(s, OPC_MOVL_GvEv + seg, datalo, base, ofs);
1136 if (bswap) {
1137 tcg_out_bswap32(s, datalo);
1138 }
1139 break;
1140 #if TCG_TARGET_REG_BITS == 64
1141 case 2 | 4:
1142 if (bswap) {
1143 tcg_out_modrm_offset(s, OPC_MOVL_GvEv + seg, datalo, base, ofs);
1144 tcg_out_bswap32(s, datalo);
1145 tcg_out_ext32s(s, datalo, datalo);
1146 } else {
1147 tcg_out_modrm_offset(s, OPC_MOVSLQ + seg, datalo, base, ofs);
1148 }
1149 break;
1150 #endif
1151 case 3:
1152 if (TCG_TARGET_REG_BITS == 64) {
1153 tcg_out_modrm_offset(s, OPC_MOVL_GvEv + P_REXW + seg,
1154 datalo, base, ofs);
1155 if (bswap) {
1156 tcg_out_bswap64(s, datalo);
1157 }
1158 } else {
1159 if (bswap) {
1160 int t = datalo;
1161 datalo = datahi;
1162 datahi = t;
1163 }
1164 if (base != datalo) {
1165 tcg_out_modrm_offset(s, OPC_MOVL_GvEv + seg,
1166 datalo, base, ofs);
1167 tcg_out_modrm_offset(s, OPC_MOVL_GvEv + seg,
1168 datahi, base, ofs + 4);
1169 } else {
1170 tcg_out_modrm_offset(s, OPC_MOVL_GvEv + seg,
1171 datahi, base, ofs + 4);
1172 tcg_out_modrm_offset(s, OPC_MOVL_GvEv + seg,
1173 datalo, base, ofs);
1174 }
1175 if (bswap) {
1176 tcg_out_bswap32(s, datalo);
1177 tcg_out_bswap32(s, datahi);
1178 }
1179 }
1180 break;
1181 default:
1182 tcg_abort();
1183 }
1184 }
1185
1186 /* XXX: qemu_ld and qemu_st could be modified to clobber only EDX and
1187 EAX. It will be useful once fixed registers globals are less
1188 common. */
1189 static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args,
1190 int opc)
1191 {
1192 int data_reg, data_reg2 = 0;
1193 int addrlo_idx;
1194 #if defined(CONFIG_SOFTMMU)
1195 int mem_index, s_bits;
1196 #if TCG_TARGET_REG_BITS == 32
1197 int stack_adjust;
1198 #endif
1199 uint8_t *label_ptr[3];
1200 #endif
1201
1202 data_reg = args[0];
1203 addrlo_idx = 1;
1204 if (TCG_TARGET_REG_BITS == 32 && opc == 3) {
1205 data_reg2 = args[1];
1206 addrlo_idx = 2;
1207 }
1208
1209 #if defined(CONFIG_SOFTMMU)
1210 mem_index = args[addrlo_idx + 1 + (TARGET_LONG_BITS > TCG_TARGET_REG_BITS)];
1211 s_bits = opc & 3;
1212
1213 tcg_out_tlb_load(s, addrlo_idx, mem_index, s_bits, args,
1214 label_ptr, offsetof(CPUTLBEntry, addr_read));
1215
1216 /* TLB Hit. */
1217 tcg_out_qemu_ld_direct(s, data_reg, data_reg2, TCG_REG_L1, 0, 0, opc);
1218
1219 /* jmp label2 */
1220 tcg_out8(s, OPC_JMP_short);
1221 label_ptr[2] = s->code_ptr;
1222 s->code_ptr++;
1223
1224 /* TLB Miss. */
1225
1226 /* label1: */
1227 *label_ptr[0] = s->code_ptr - label_ptr[0] - 1;
1228 if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
1229 *label_ptr[1] = s->code_ptr - label_ptr[1] - 1;
1230 }
1231
1232 /* XXX: move that code at the end of the TB */
1233 #if TCG_TARGET_REG_BITS == 32
1234 tcg_out_pushi(s, mem_index);
1235 stack_adjust = 4;
1236 if (TARGET_LONG_BITS == 64) {
1237 tcg_out_push(s, args[addrlo_idx + 1]);
1238 stack_adjust += 4;
1239 }
1240 tcg_out_push(s, args[addrlo_idx]);
1241 stack_adjust += 4;
1242 tcg_out_push(s, TCG_AREG0);
1243 stack_adjust += 4;
1244 #else
1245 tcg_out_mov(s, TCG_TYPE_I64, tcg_target_call_iarg_regs[0], TCG_AREG0);
1246 /* The second argument is already loaded with addrlo. */
1247 tcg_out_movi(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[2], mem_index);
1248 #endif
1249
1250 tcg_out_calli(s, (tcg_target_long)qemu_ld_helpers[s_bits]);
1251
1252 #if TCG_TARGET_REG_BITS == 32
1253 if (stack_adjust == (TCG_TARGET_REG_BITS / 8)) {
1254 /* Pop and discard. This is 2 bytes smaller than the add. */
1255 tcg_out_pop(s, TCG_REG_ECX);
1256 } else if (stack_adjust != 0) {
1257 tcg_out_addi(s, TCG_REG_CALL_STACK, stack_adjust);
1258 }
1259 #endif
1260
1261 switch(opc) {
1262 case 0 | 4:
1263 tcg_out_ext8s(s, data_reg, TCG_REG_EAX, P_REXW);
1264 break;
1265 case 1 | 4:
1266 tcg_out_ext16s(s, data_reg, TCG_REG_EAX, P_REXW);
1267 break;
1268 case 0:
1269 tcg_out_ext8u(s, data_reg, TCG_REG_EAX);
1270 break;
1271 case 1:
1272 tcg_out_ext16u(s, data_reg, TCG_REG_EAX);
1273 break;
1274 case 2:
1275 tcg_out_mov(s, TCG_TYPE_I32, data_reg, TCG_REG_EAX);
1276 break;
1277 #if TCG_TARGET_REG_BITS == 64
1278 case 2 | 4:
1279 tcg_out_ext32s(s, data_reg, TCG_REG_EAX);
1280 break;
1281 #endif
1282 case 3:
1283 if (TCG_TARGET_REG_BITS == 64) {
1284 tcg_out_mov(s, TCG_TYPE_I64, data_reg, TCG_REG_RAX);
1285 } else if (data_reg == TCG_REG_EDX) {
1286 /* xchg %edx, %eax */
1287 tcg_out_opc(s, OPC_XCHG_ax_r32 + TCG_REG_EDX, 0, 0, 0);
1288 tcg_out_mov(s, TCG_TYPE_I32, data_reg2, TCG_REG_EAX);
1289 } else {
1290 tcg_out_mov(s, TCG_TYPE_I32, data_reg, TCG_REG_EAX);
1291 tcg_out_mov(s, TCG_TYPE_I32, data_reg2, TCG_REG_EDX);
1292 }
1293 break;
1294 default:
1295 tcg_abort();
1296 }
1297
1298 /* label2: */
1299 *label_ptr[2] = s->code_ptr - label_ptr[2] - 1;
1300 #else
1301 {
1302 int32_t offset = GUEST_BASE;
1303 int base = args[addrlo_idx];
1304 int seg = 0;
1305
1306 /* ??? We assume all operations have left us with register contents
1307 that are zero extended. So far this appears to be true. If we
1308 want to enforce this, we can either do an explicit zero-extension
1309 here, or (if GUEST_BASE == 0, or a segment register is in use)
1310 use the ADDR32 prefix. For now, do nothing. */
1311 if (GUEST_BASE && guest_base_flags) {
1312 seg = guest_base_flags;
1313 offset = 0;
1314 } else if (TCG_TARGET_REG_BITS == 64 && offset != GUEST_BASE) {
1315 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_L1, GUEST_BASE);
1316 tgen_arithr(s, ARITH_ADD + P_REXW, TCG_REG_L1, base);
1317 base = TCG_REG_L1;
1318 offset = 0;
1319 }
1320
1321 tcg_out_qemu_ld_direct(s, data_reg, data_reg2, base, offset, seg, opc);
1322 }
1323 #endif
1324 }
1325
1326 static void tcg_out_qemu_st_direct(TCGContext *s, int datalo, int datahi,
1327 int base, tcg_target_long ofs, int seg,
1328 int sizeop)
1329 {
1330 #ifdef TARGET_WORDS_BIGENDIAN
1331 const int bswap = 1;
1332 #else
1333 const int bswap = 0;
1334 #endif
1335 /* ??? Ideally we wouldn't need a scratch register. For user-only,
1336 we could perform the bswap twice to restore the original value
1337 instead of moving to the scratch. But as it is, the L constraint
1338 means that TCG_REG_L0 is definitely free here. */
1339 const int scratch = TCG_REG_L0;
1340
1341 switch (sizeop) {
1342 case 0:
1343 tcg_out_modrm_offset(s, OPC_MOVB_EvGv + P_REXB_R + seg,
1344 datalo, base, ofs);
1345 break;
1346 case 1:
1347 if (bswap) {
1348 tcg_out_mov(s, TCG_TYPE_I32, scratch, datalo);
1349 tcg_out_rolw_8(s, scratch);
1350 datalo = scratch;
1351 }
1352 tcg_out_modrm_offset(s, OPC_MOVL_EvGv + P_DATA16 + seg,
1353 datalo, base, ofs);
1354 break;
1355 case 2:
1356 if (bswap) {
1357 tcg_out_mov(s, TCG_TYPE_I32, scratch, datalo);
1358 tcg_out_bswap32(s, scratch);
1359 datalo = scratch;
1360 }
1361 tcg_out_modrm_offset(s, OPC_MOVL_EvGv + seg, datalo, base, ofs);
1362 break;
1363 case 3:
1364 if (TCG_TARGET_REG_BITS == 64) {
1365 if (bswap) {
1366 tcg_out_mov(s, TCG_TYPE_I64, scratch, datalo);
1367 tcg_out_bswap64(s, scratch);
1368 datalo = scratch;
1369 }
1370 tcg_out_modrm_offset(s, OPC_MOVL_EvGv + P_REXW + seg,
1371 datalo, base, ofs);
1372 } else if (bswap) {
1373 tcg_out_mov(s, TCG_TYPE_I32, scratch, datahi);
1374 tcg_out_bswap32(s, scratch);
1375 tcg_out_modrm_offset(s, OPC_MOVL_EvGv + seg, scratch, base, ofs);
1376 tcg_out_mov(s, TCG_TYPE_I32, scratch, datalo);
1377 tcg_out_bswap32(s, scratch);
1378 tcg_out_modrm_offset(s, OPC_MOVL_EvGv + seg, scratch, base, ofs+4);
1379 } else {
1380 tcg_out_modrm_offset(s, OPC_MOVL_EvGv + seg, datalo, base, ofs);
1381 tcg_out_modrm_offset(s, OPC_MOVL_EvGv + seg, datahi, base, ofs+4);
1382 }
1383 break;
1384 default:
1385 tcg_abort();
1386 }
1387 }
1388
1389 static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args,
1390 int opc)
1391 {
1392 int data_reg, data_reg2 = 0;
1393 int addrlo_idx;
1394 #if defined(CONFIG_SOFTMMU)
1395 int mem_index, s_bits;
1396 int stack_adjust;
1397 uint8_t *label_ptr[3];
1398 #endif
1399
1400 data_reg = args[0];
1401 addrlo_idx = 1;
1402 if (TCG_TARGET_REG_BITS == 32 && opc == 3) {
1403 data_reg2 = args[1];
1404 addrlo_idx = 2;
1405 }
1406
1407 #if defined(CONFIG_SOFTMMU)
1408 mem_index = args[addrlo_idx + 1 + (TARGET_LONG_BITS > TCG_TARGET_REG_BITS)];
1409 s_bits = opc;
1410
1411 tcg_out_tlb_load(s, addrlo_idx, mem_index, s_bits, args,
1412 label_ptr, offsetof(CPUTLBEntry, addr_write));
1413
1414 /* TLB Hit. */
1415 tcg_out_qemu_st_direct(s, data_reg, data_reg2, TCG_REG_L1, 0, 0, opc);
1416
1417 /* jmp label2 */
1418 tcg_out8(s, OPC_JMP_short);
1419 label_ptr[2] = s->code_ptr;
1420 s->code_ptr++;
1421
1422 /* TLB Miss. */
1423
1424 /* label1: */
1425 *label_ptr[0] = s->code_ptr - label_ptr[0] - 1;
1426 if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
1427 *label_ptr[1] = s->code_ptr - label_ptr[1] - 1;
1428 }
1429
1430 /* XXX: move that code at the end of the TB */
1431 #if TCG_TARGET_REG_BITS == 32
1432 tcg_out_pushi(s, mem_index);
1433 stack_adjust = 4;
1434 if (opc == 3) {
1435 tcg_out_push(s, data_reg2);
1436 stack_adjust += 4;
1437 }
1438 tcg_out_push(s, data_reg);
1439 stack_adjust += 4;
1440 if (TARGET_LONG_BITS == 64) {
1441 tcg_out_push(s, args[addrlo_idx + 1]);
1442 stack_adjust += 4;
1443 }
1444 tcg_out_push(s, args[addrlo_idx]);
1445 stack_adjust += 4;
1446 tcg_out_push(s, TCG_AREG0);
1447 stack_adjust += 4;
1448 #else
1449 tcg_out_mov(s, TCG_TYPE_I64, tcg_target_call_iarg_regs[0], TCG_AREG0);
1450 /* The second argument is already loaded with addrlo. */
1451 tcg_out_mov(s, (opc == 3 ? TCG_TYPE_I64 : TCG_TYPE_I32),
1452 tcg_target_call_iarg_regs[2], data_reg);
1453 tcg_out_movi(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[3], mem_index);
1454 stack_adjust = 0;
1455 #endif
1456
1457 tcg_out_calli(s, (tcg_target_long)qemu_st_helpers[s_bits]);
1458
1459 if (stack_adjust == (TCG_TARGET_REG_BITS / 8)) {
1460 /* Pop and discard. This is 2 bytes smaller than the add. */
1461 tcg_out_pop(s, TCG_REG_ECX);
1462 } else if (stack_adjust != 0) {
1463 tcg_out_addi(s, TCG_REG_CALL_STACK, stack_adjust);
1464 }
1465
1466 /* label2: */
1467 *label_ptr[2] = s->code_ptr - label_ptr[2] - 1;
1468 #else
1469 {
1470 int32_t offset = GUEST_BASE;
1471 int base = args[addrlo_idx];
1472 int seg = 0;
1473
1474 /* ??? We assume all operations have left us with register contents
1475 that are zero extended. So far this appears to be true. If we
1476 want to enforce this, we can either do an explicit zero-extension
1477 here, or (if GUEST_BASE == 0, or a segment register is in use)
1478 use the ADDR32 prefix. For now, do nothing. */
1479 if (GUEST_BASE && guest_base_flags) {
1480 seg = guest_base_flags;
1481 offset = 0;
1482 } else if (TCG_TARGET_REG_BITS == 64 && offset != GUEST_BASE) {
1483 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_L1, GUEST_BASE);
1484 tgen_arithr(s, ARITH_ADD + P_REXW, TCG_REG_L1, base);
1485 base = TCG_REG_L1;
1486 offset = 0;
1487 }
1488
1489 tcg_out_qemu_st_direct(s, data_reg, data_reg2, base, offset, seg, opc);
1490 }
1491 #endif
1492 }
1493
1494 static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
1495 const TCGArg *args, const int *const_args)
1496 {
1497 int c, rexw = 0;
1498
1499 #if TCG_TARGET_REG_BITS == 64
1500 # define OP_32_64(x) \
1501 case glue(glue(INDEX_op_, x), _i64): \
1502 rexw = P_REXW; /* FALLTHRU */ \
1503 case glue(glue(INDEX_op_, x), _i32)
1504 #else
1505 # define OP_32_64(x) \
1506 case glue(glue(INDEX_op_, x), _i32)
1507 #endif
1508
1509 switch(opc) {
1510 case INDEX_op_exit_tb:
1511 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_EAX, args[0]);
1512 tcg_out_jmp(s, (tcg_target_long) tb_ret_addr);
1513 break;
1514 case INDEX_op_goto_tb:
1515 if (s->tb_jmp_offset) {
1516 /* direct jump method */
1517 tcg_out8(s, OPC_JMP_long); /* jmp im */
1518 s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
1519 tcg_out32(s, 0);
1520 } else {
1521 /* indirect jump method */
1522 tcg_out_modrm_offset(s, OPC_GRP5, EXT5_JMPN_Ev, -1,
1523 (tcg_target_long)(s->tb_next + args[0]));
1524 }
1525 s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf;
1526 break;
1527 case INDEX_op_call:
1528 if (const_args[0]) {
1529 tcg_out_calli(s, args[0]);
1530 } else {
1531 /* call *reg */
1532 tcg_out_modrm(s, OPC_GRP5, EXT5_CALLN_Ev, args[0]);
1533 }
1534 break;
1535 case INDEX_op_br:
1536 tcg_out_jxx(s, JCC_JMP, args[0], 0);
1537 break;
1538 case INDEX_op_movi_i32:
1539 tcg_out_movi(s, TCG_TYPE_I32, args[0], args[1]);
1540 break;
1541 OP_32_64(ld8u):
1542 /* Note that we can ignore REXW for the zero-extend to 64-bit. */
1543 tcg_out_modrm_offset(s, OPC_MOVZBL, args[0], args[1], args[2]);
1544 break;
1545 OP_32_64(ld8s):
1546 tcg_out_modrm_offset(s, OPC_MOVSBL + rexw, args[0], args[1], args[2]);
1547 break;
1548 OP_32_64(ld16u):
1549 /* Note that we can ignore REXW for the zero-extend to 64-bit. */
1550 tcg_out_modrm_offset(s, OPC_MOVZWL, args[0], args[1], args[2]);
1551 break;
1552 OP_32_64(ld16s):
1553 tcg_out_modrm_offset(s, OPC_MOVSWL + rexw, args[0], args[1], args[2]);
1554 break;
1555 #if TCG_TARGET_REG_BITS == 64
1556 case INDEX_op_ld32u_i64:
1557 #endif
1558 case INDEX_op_ld_i32:
1559 tcg_out_ld(s, TCG_TYPE_I32, args[0], args[1], args[2]);
1560 break;
1561
1562 OP_32_64(st8):
1563 if (const_args[0]) {
1564 tcg_out_modrm_offset(s, OPC_MOVB_EvIz,
1565 0, args[1], args[2]);
1566 tcg_out8(s, args[0]);
1567 } else {
1568 tcg_out_modrm_offset(s, OPC_MOVB_EvGv | P_REXB_R,
1569 args[0], args[1], args[2]);
1570 }
1571 break;
1572 OP_32_64(st16):
1573 if (const_args[0]) {
1574 tcg_out_modrm_offset(s, OPC_MOVL_EvIz | P_DATA16,
1575 0, args[1], args[2]);
1576 tcg_out16(s, args[0]);
1577 } else {
1578 tcg_out_modrm_offset(s, OPC_MOVL_EvGv | P_DATA16,
1579 args[0], args[1], args[2]);
1580 }
1581 break;
1582 #if TCG_TARGET_REG_BITS == 64
1583 case INDEX_op_st32_i64:
1584 #endif
1585 case INDEX_op_st_i32:
1586 if (const_args[0]) {
1587 tcg_out_modrm_offset(s, OPC_MOVL_EvIz, 0, args[1], args[2]);
1588 tcg_out32(s, args[0]);
1589 } else {
1590 tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]);
1591 }
1592 break;
1593
1594 OP_32_64(add):
1595 /* For 3-operand addition, use LEA. */
1596 if (args[0] != args[1]) {
1597 TCGArg a0 = args[0], a1 = args[1], a2 = args[2], c3 = 0;
1598
1599 if (const_args[2]) {
1600 c3 = a2, a2 = -1;
1601 } else if (a0 == a2) {
1602 /* Watch out for dest = src + dest, since we've removed
1603 the matching constraint on the add. */
1604 tgen_arithr(s, ARITH_ADD + rexw, a0, a1);
1605 break;
1606 }
1607
1608 tcg_out_modrm_sib_offset(s, OPC_LEA + rexw, a0, a1, a2, 0, c3);
1609 break;
1610 }
1611 c = ARITH_ADD;
1612 goto gen_arith;
1613 OP_32_64(sub):
1614 c = ARITH_SUB;
1615 goto gen_arith;
1616 OP_32_64(and):
1617 c = ARITH_AND;
1618 goto gen_arith;
1619 OP_32_64(or):
1620 c = ARITH_OR;
1621 goto gen_arith;
1622 OP_32_64(xor):
1623 c = ARITH_XOR;
1624 goto gen_arith;
1625 gen_arith:
1626 if (const_args[2]) {
1627 tgen_arithi(s, c + rexw, args[0], args[2], 0);
1628 } else {
1629 tgen_arithr(s, c + rexw, args[0], args[2]);
1630 }
1631 break;
1632
1633 OP_32_64(mul):
1634 if (const_args[2]) {
1635 int32_t val;
1636 val = args[2];
1637 if (val == (int8_t)val) {
1638 tcg_out_modrm(s, OPC_IMUL_GvEvIb + rexw, args[0], args[0]);
1639 tcg_out8(s, val);
1640 } else {
1641 tcg_out_modrm(s, OPC_IMUL_GvEvIz + rexw, args[0], args[0]);
1642 tcg_out32(s, val);
1643 }
1644 } else {
1645 tcg_out_modrm(s, OPC_IMUL_GvEv + rexw, args[0], args[2]);
1646 }
1647 break;
1648
1649 OP_32_64(div2):
1650 tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_IDIV, args[4]);
1651 break;
1652 OP_32_64(divu2):
1653 tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_DIV, args[4]);
1654 break;
1655
1656 OP_32_64(shl):
1657 c = SHIFT_SHL;
1658 goto gen_shift;
1659 OP_32_64(shr):
1660 c = SHIFT_SHR;
1661 goto gen_shift;
1662 OP_32_64(sar):
1663 c = SHIFT_SAR;
1664 goto gen_shift;
1665 OP_32_64(rotl):
1666 c = SHIFT_ROL;
1667 goto gen_shift;
1668 OP_32_64(rotr):
1669 c = SHIFT_ROR;
1670 goto gen_shift;
1671 gen_shift:
1672 if (const_args[2]) {
1673 tcg_out_shifti(s, c + rexw, args[0], args[2]);
1674 } else {
1675 tcg_out_modrm(s, OPC_SHIFT_cl + rexw, c, args[0]);
1676 }
1677 break;
1678
1679 case INDEX_op_brcond_i32:
1680 tcg_out_brcond32(s, args[2], args[0], args[1], const_args[1],
1681 args[3], 0);
1682 break;
1683 case INDEX_op_setcond_i32:
1684 tcg_out_setcond32(s, args[3], args[0], args[1],
1685 args[2], const_args[2]);
1686 break;
1687 case INDEX_op_movcond_i32:
1688 tcg_out_movcond32(s, args[5], args[0], args[1],
1689 args[2], const_args[2], args[3]);
1690 break;
1691
1692 OP_32_64(bswap16):
1693 tcg_out_rolw_8(s, args[0]);
1694 break;
1695 OP_32_64(bswap32):
1696 tcg_out_bswap32(s, args[0]);
1697 break;
1698
1699 OP_32_64(neg):
1700 tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_NEG, args[0]);
1701 break;
1702 OP_32_64(not):
1703 tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_NOT, args[0]);
1704 break;
1705
1706 OP_32_64(ext8s):
1707 tcg_out_ext8s(s, args[0], args[1], rexw);
1708 break;
1709 OP_32_64(ext16s):
1710 tcg_out_ext16s(s, args[0], args[1], rexw);
1711 break;
1712 OP_32_64(ext8u):
1713 tcg_out_ext8u(s, args[0], args[1]);
1714 break;
1715 OP_32_64(ext16u):
1716 tcg_out_ext16u(s, args[0], args[1]);
1717 break;
1718
1719 case INDEX_op_qemu_ld8u:
1720 tcg_out_qemu_ld(s, args, 0);
1721 break;
1722 case INDEX_op_qemu_ld8s:
1723 tcg_out_qemu_ld(s, args, 0 | 4);
1724 break;
1725 case INDEX_op_qemu_ld16u:
1726 tcg_out_qemu_ld(s, args, 1);
1727 break;
1728 case INDEX_op_qemu_ld16s:
1729 tcg_out_qemu_ld(s, args, 1 | 4);
1730 break;
1731 #if TCG_TARGET_REG_BITS == 64
1732 case INDEX_op_qemu_ld32u:
1733 #endif
1734 case INDEX_op_qemu_ld32:
1735 tcg_out_qemu_ld(s, args, 2);
1736 break;
1737 case INDEX_op_qemu_ld64:
1738 tcg_out_qemu_ld(s, args, 3);
1739 break;
1740
1741 case INDEX_op_qemu_st8:
1742 tcg_out_qemu_st(s, args, 0);
1743 break;
1744 case INDEX_op_qemu_st16:
1745 tcg_out_qemu_st(s, args, 1);
1746 break;
1747 case INDEX_op_qemu_st32:
1748 tcg_out_qemu_st(s, args, 2);
1749 break;
1750 case INDEX_op_qemu_st64:
1751 tcg_out_qemu_st(s, args, 3);
1752 break;
1753
1754 #if TCG_TARGET_REG_BITS == 32
1755 case INDEX_op_brcond2_i32:
1756 tcg_out_brcond2(s, args, const_args, 0);
1757 break;
1758 case INDEX_op_setcond2_i32:
1759 tcg_out_setcond2(s, args, const_args);
1760 break;
1761 case INDEX_op_mulu2_i32:
1762 tcg_out_modrm(s, OPC_GRP3_Ev, EXT3_MUL, args[3]);
1763 break;
1764 case INDEX_op_add2_i32:
1765 if (const_args[4]) {
1766 tgen_arithi(s, ARITH_ADD, args[0], args[4], 1);
1767 } else {
1768 tgen_arithr(s, ARITH_ADD, args[0], args[4]);
1769 }
1770 if (const_args[5]) {
1771 tgen_arithi(s, ARITH_ADC, args[1], args[5], 1);
1772 } else {
1773 tgen_arithr(s, ARITH_ADC, args[1], args[5]);
1774 }
1775 break;
1776 case INDEX_op_sub2_i32:
1777 if (const_args[4]) {
1778 tgen_arithi(s, ARITH_SUB, args[0], args[4], 1);
1779 } else {
1780 tgen_arithr(s, ARITH_SUB, args[0], args[4]);
1781 }
1782 if (const_args[5]) {
1783 tgen_arithi(s, ARITH_SBB, args[1], args[5], 1);
1784 } else {
1785 tgen_arithr(s, ARITH_SBB, args[1], args[5]);
1786 }
1787 break;
1788 #else /* TCG_TARGET_REG_BITS == 64 */
1789 case INDEX_op_movi_i64:
1790 tcg_out_movi(s, TCG_TYPE_I64, args[0], args[1]);
1791 break;
1792 case INDEX_op_ld32s_i64:
1793 tcg_out_modrm_offset(s, OPC_MOVSLQ, args[0], args[1], args[2]);
1794 break;
1795 case INDEX_op_ld_i64:
1796 tcg_out_ld(s, TCG_TYPE_I64, args[0], args[1], args[2]);
1797 break;
1798 case INDEX_op_st_i64:
1799 if (const_args[0]) {
1800 tcg_out_modrm_offset(s, OPC_MOVL_EvIz | P_REXW,
1801 0, args[1], args[2]);
1802 tcg_out32(s, args[0]);
1803 } else {
1804 tcg_out_st(s, TCG_TYPE_I64, args[0], args[1], args[2]);
1805 }
1806 break;
1807 case INDEX_op_qemu_ld32s:
1808 tcg_out_qemu_ld(s, args, 2 | 4);
1809 break;
1810
1811 case INDEX_op_brcond_i64:
1812 tcg_out_brcond64(s, args[2], args[0], args[1], const_args[1],
1813 args[3], 0);
1814 break;
1815 case INDEX_op_setcond_i64:
1816 tcg_out_setcond64(s, args[3], args[0], args[1],
1817 args[2], const_args[2]);
1818 break;
1819 case INDEX_op_movcond_i64:
1820 tcg_out_movcond64(s, args[5], args[0], args[1],
1821 args[2], const_args[2], args[3]);
1822 break;
1823
1824 case INDEX_op_bswap64_i64:
1825 tcg_out_bswap64(s, args[0]);
1826 break;
1827 case INDEX_op_ext32u_i64:
1828 tcg_out_ext32u(s, args[0], args[1]);
1829 break;
1830 case INDEX_op_ext32s_i64:
1831 tcg_out_ext32s(s, args[0], args[1]);
1832 break;
1833 #endif
1834
1835 OP_32_64(deposit):
1836 if (args[3] == 0 && args[4] == 8) {
1837 /* load bits 0..7 */
1838 tcg_out_modrm(s, OPC_MOVB_EvGv | P_REXB_R | P_REXB_RM,
1839 args[2], args[0]);
1840 } else if (args[3] == 8 && args[4] == 8) {
1841 /* load bits 8..15 */
1842 tcg_out_modrm(s, OPC_MOVB_EvGv, args[2], args[0] + 4);
1843 } else if (args[3] == 0 && args[4] == 16) {
1844 /* load bits 0..15 */
1845 tcg_out_modrm(s, OPC_MOVL_EvGv | P_DATA16, args[2], args[0]);
1846 } else {
1847 tcg_abort();
1848 }
1849 break;
1850
1851 default:
1852 tcg_abort();
1853 }
1854
1855 #undef OP_32_64
1856 }
1857
1858 static const TCGTargetOpDef x86_op_defs[] = {
1859 { INDEX_op_exit_tb, { } },
1860 { INDEX_op_goto_tb, { } },
1861 { INDEX_op_call, { "ri" } },
1862 { INDEX_op_br, { } },
1863 { INDEX_op_mov_i32, { "r", "r" } },
1864 { INDEX_op_movi_i32, { "r" } },
1865 { INDEX_op_ld8u_i32, { "r", "r" } },
1866 { INDEX_op_ld8s_i32, { "r", "r" } },
1867 { INDEX_op_ld16u_i32, { "r", "r" } },
1868 { INDEX_op_ld16s_i32, { "r", "r" } },
1869 { INDEX_op_ld_i32, { "r", "r" } },
1870 { INDEX_op_st8_i32, { "qi", "r" } },
1871 { INDEX_op_st16_i32, { "ri", "r" } },
1872 { INDEX_op_st_i32, { "ri", "r" } },
1873
1874 { INDEX_op_add_i32, { "r", "r", "ri" } },
1875 { INDEX_op_sub_i32, { "r", "0", "ri" } },
1876 { INDEX_op_mul_i32, { "r", "0", "ri" } },
1877 { INDEX_op_div2_i32, { "a", "d", "0", "1", "r" } },
1878 { INDEX_op_divu2_i32, { "a", "d", "0", "1", "r" } },
1879 { INDEX_op_and_i32, { "r", "0", "ri" } },
1880 { INDEX_op_or_i32, { "r", "0", "ri" } },
1881 { INDEX_op_xor_i32, { "r", "0", "ri" } },
1882
1883 { INDEX_op_shl_i32, { "r", "0", "ci" } },
1884 { INDEX_op_shr_i32, { "r", "0", "ci" } },
1885 { INDEX_op_sar_i32, { "r", "0", "ci" } },
1886 { INDEX_op_rotl_i32, { "r", "0", "ci" } },
1887 { INDEX_op_rotr_i32, { "r", "0", "ci" } },
1888
1889 { INDEX_op_brcond_i32, { "r", "ri" } },
1890
1891 { INDEX_op_bswap16_i32, { "r", "0" } },
1892 { INDEX_op_bswap32_i32, { "r", "0" } },
1893
1894 { INDEX_op_neg_i32, { "r", "0" } },
1895
1896 { INDEX_op_not_i32, { "r", "0" } },
1897
1898 { INDEX_op_ext8s_i32, { "r", "q" } },
1899 { INDEX_op_ext16s_i32, { "r", "r" } },
1900 { INDEX_op_ext8u_i32, { "r", "q" } },
1901 { INDEX_op_ext16u_i32, { "r", "r" } },
1902
1903 { INDEX_op_setcond_i32, { "q", "r", "ri" } },
1904
1905 { INDEX_op_deposit_i32, { "Q", "0", "Q" } },
1906 #if TCG_TARGET_HAS_movcond_i32
1907 { INDEX_op_movcond_i32, { "r", "r", "ri", "r", "0" } },
1908 #endif
1909
1910 #if TCG_TARGET_REG_BITS == 32
1911 { INDEX_op_mulu2_i32, { "a", "d", "a", "r" } },
1912 { INDEX_op_add2_i32, { "r", "r", "0", "1", "ri", "ri" } },
1913 { INDEX_op_sub2_i32, { "r", "r", "0", "1", "ri", "ri" } },
1914 { INDEX_op_brcond2_i32, { "r", "r", "ri", "ri" } },
1915 { INDEX_op_setcond2_i32, { "r", "r", "r", "ri", "ri" } },
1916 #else
1917 { INDEX_op_mov_i64, { "r", "r" } },
1918 { INDEX_op_movi_i64, { "r" } },
1919 { INDEX_op_ld8u_i64, { "r", "r" } },
1920 { INDEX_op_ld8s_i64, { "r", "r" } },
1921 { INDEX_op_ld16u_i64, { "r", "r" } },
1922 { INDEX_op_ld16s_i64, { "r", "r" } },
1923 { INDEX_op_ld32u_i64, { "r", "r" } },
1924 { INDEX_op_ld32s_i64, { "r", "r" } },
1925 { INDEX_op_ld_i64, { "r", "r" } },
1926 { INDEX_op_st8_i64, { "ri", "r" } },
1927 { INDEX_op_st16_i64, { "ri", "r" } },
1928 { INDEX_op_st32_i64, { "ri", "r" } },
1929 { INDEX_op_st_i64, { "re", "r" } },
1930
1931 { INDEX_op_add_i64, { "r", "0", "re" } },
1932 { INDEX_op_mul_i64, { "r", "0", "re" } },
1933 { INDEX_op_div2_i64, { "a", "d", "0", "1", "r" } },
1934 { INDEX_op_divu2_i64, { "a", "d", "0", "1", "r" } },
1935 { INDEX_op_sub_i64, { "r", "0", "re" } },
1936 { INDEX_op_and_i64, { "r", "0", "reZ" } },
1937 { INDEX_op_or_i64, { "r", "0", "re" } },
1938 { INDEX_op_xor_i64, { "r", "0", "re" } },
1939
1940 { INDEX_op_shl_i64, { "r", "0", "ci" } },
1941 { INDEX_op_shr_i64, { "r", "0", "ci" } },
1942 { INDEX_op_sar_i64, { "r", "0", "ci" } },
1943 { INDEX_op_rotl_i64, { "r", "0", "ci" } },
1944 { INDEX_op_rotr_i64, { "r", "0", "ci" } },
1945
1946 { INDEX_op_brcond_i64, { "r", "re" } },
1947 { INDEX_op_setcond_i64, { "r", "r", "re" } },
1948
1949 { INDEX_op_bswap16_i64, { "r", "0" } },
1950 { INDEX_op_bswap32_i64, { "r", "0" } },
1951 { INDEX_op_bswap64_i64, { "r", "0" } },
1952 { INDEX_op_neg_i64, { "r", "0" } },
1953 { INDEX_op_not_i64, { "r", "0" } },
1954
1955 { INDEX_op_ext8s_i64, { "r", "r" } },
1956 { INDEX_op_ext16s_i64, { "r", "r" } },
1957 { INDEX_op_ext32s_i64, { "r", "r" } },
1958 { INDEX_op_ext8u_i64, { "r", "r" } },
1959 { INDEX_op_ext16u_i64, { "r", "r" } },
1960 { INDEX_op_ext32u_i64, { "r", "r" } },
1961
1962 { INDEX_op_deposit_i64, { "Q", "0", "Q" } },
1963 { INDEX_op_movcond_i64, { "r", "r", "re", "r", "0" } },
1964 #endif
1965
1966 #if TCG_TARGET_REG_BITS == 64
1967 { INDEX_op_qemu_ld8u, { "r", "L" } },
1968 { INDEX_op_qemu_ld8s, { "r", "L" } },
1969 { INDEX_op_qemu_ld16u, { "r", "L" } },
1970 { INDEX_op_qemu_ld16s, { "r", "L" } },
1971 { INDEX_op_qemu_ld32, { "r", "L" } },
1972 { INDEX_op_qemu_ld32u, { "r", "L" } },
1973 { INDEX_op_qemu_ld32s, { "r", "L" } },
1974 { INDEX_op_qemu_ld64, { "r", "L" } },
1975
1976 { INDEX_op_qemu_st8, { "L", "L" } },
1977 { INDEX_op_qemu_st16, { "L", "L" } },
1978 { INDEX_op_qemu_st32, { "L", "L" } },
1979 { INDEX_op_qemu_st64, { "L", "L" } },
1980 #elif TARGET_LONG_BITS <= TCG_TARGET_REG_BITS
1981 { INDEX_op_qemu_ld8u, { "r", "L" } },
1982 { INDEX_op_qemu_ld8s, { "r", "L" } },
1983 { INDEX_op_qemu_ld16u, { "r", "L" } },
1984 { INDEX_op_qemu_ld16s, { "r", "L" } },
1985 { INDEX_op_qemu_ld32, { "r", "L" } },
1986 { INDEX_op_qemu_ld64, { "r", "r", "L" } },
1987
1988 { INDEX_op_qemu_st8, { "cb", "L" } },
1989 { INDEX_op_qemu_st16, { "L", "L" } },
1990 { INDEX_op_qemu_st32, { "L", "L" } },
1991 { INDEX_op_qemu_st64, { "L", "L", "L" } },
1992 #else
1993 { INDEX_op_qemu_ld8u, { "r", "L", "L" } },
1994 { INDEX_op_qemu_ld8s, { "r", "L", "L" } },
1995 { INDEX_op_qemu_ld16u, { "r", "L", "L" } },
1996 { INDEX_op_qemu_ld16s, { "r", "L", "L" } },
1997 { INDEX_op_qemu_ld32, { "r", "L", "L" } },
1998 { INDEX_op_qemu_ld64, { "r", "r", "L", "L" } },
1999
2000 { INDEX_op_qemu_st8, { "cb", "L", "L" } },
2001 { INDEX_op_qemu_st16, { "L", "L", "L" } },
2002 { INDEX_op_qemu_st32, { "L", "L", "L" } },
2003 { INDEX_op_qemu_st64, { "L", "L", "L", "L" } },
2004 #endif
2005 { -1 },
2006 };
2007
2008 static int tcg_target_callee_save_regs[] = {
2009 #if TCG_TARGET_REG_BITS == 64
2010 TCG_REG_RBP,
2011 TCG_REG_RBX,
2012 #if defined(_WIN64)
2013 TCG_REG_RDI,
2014 TCG_REG_RSI,
2015 #endif
2016 TCG_REG_R12,
2017 TCG_REG_R13,
2018 TCG_REG_R14, /* Currently used for the global env. */
2019 TCG_REG_R15,
2020 #else
2021 TCG_REG_EBP, /* Currently used for the global env. */
2022 TCG_REG_EBX,
2023 TCG_REG_ESI,
2024 TCG_REG_EDI,
2025 #endif
2026 };
2027
2028 /* Compute frame size via macros, to share between tcg_target_qemu_prologue
2029 and tcg_register_jit. */
2030
2031 #define PUSH_SIZE \
2032 ((1 + ARRAY_SIZE(tcg_target_callee_save_regs)) \
2033 * (TCG_TARGET_REG_BITS / 8))
2034
2035 #define FRAME_SIZE \
2036 ((PUSH_SIZE \
2037 + TCG_STATIC_CALL_ARGS_SIZE \
2038 + CPU_TEMP_BUF_NLONGS * sizeof(long) \
2039 + TCG_TARGET_STACK_ALIGN - 1) \
2040 & ~(TCG_TARGET_STACK_ALIGN - 1))
2041
2042 /* Generate global QEMU prologue and epilogue code */
2043 static void tcg_target_qemu_prologue(TCGContext *s)
2044 {
2045 int i, stack_addend;
2046
2047 /* TB prologue */
2048
2049 /* Reserve some stack space, also for TCG temps. */
2050 stack_addend = FRAME_SIZE - PUSH_SIZE;
2051 tcg_set_frame(s, TCG_REG_CALL_STACK, TCG_STATIC_CALL_ARGS_SIZE,
2052 CPU_TEMP_BUF_NLONGS * sizeof(long));
2053
2054 /* Save all callee saved registers. */
2055 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
2056 tcg_out_push(s, tcg_target_callee_save_regs[i]);
2057 }
2058
2059 #if TCG_TARGET_REG_BITS == 32
2060 tcg_out_ld(s, TCG_TYPE_PTR, TCG_AREG0, TCG_REG_ESP,
2061 (ARRAY_SIZE(tcg_target_callee_save_regs) + 1) * 4);
2062 tcg_out_addi(s, TCG_REG_ESP, -stack_addend);
2063 /* jmp *tb. */
2064 tcg_out_modrm_offset(s, OPC_GRP5, EXT5_JMPN_Ev, TCG_REG_ESP,
2065 (ARRAY_SIZE(tcg_target_callee_save_regs) + 2) * 4
2066 + stack_addend);
2067 #else
2068 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
2069 tcg_out_addi(s, TCG_REG_ESP, -stack_addend);
2070 /* jmp *tb. */
2071 tcg_out_modrm(s, OPC_GRP5, EXT5_JMPN_Ev, tcg_target_call_iarg_regs[1]);
2072 #endif
2073
2074 /* TB epilogue */
2075 tb_ret_addr = s->code_ptr;
2076
2077 tcg_out_addi(s, TCG_REG_CALL_STACK, stack_addend);
2078
2079 for (i = ARRAY_SIZE(tcg_target_callee_save_regs) - 1; i >= 0; i--) {
2080 tcg_out_pop(s, tcg_target_callee_save_regs[i]);
2081 }
2082 tcg_out_opc(s, OPC_RET, 0, 0, 0);
2083
2084 #if !defined(CONFIG_SOFTMMU)
2085 /* Try to set up a segment register to point to GUEST_BASE. */
2086 if (GUEST_BASE) {
2087 setup_guest_base_seg();
2088 }
2089 #endif
2090 }
2091
2092 static void tcg_target_init(TCGContext *s)
2093 {
2094 #if !defined(CONFIG_USER_ONLY)
2095 /* fail safe */
2096 if ((1 << CPU_TLB_ENTRY_BITS) != sizeof(CPUTLBEntry))
2097 tcg_abort();
2098 #endif
2099
2100 if (TCG_TARGET_REG_BITS == 64) {
2101 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffff);
2102 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I64], 0, 0xffff);
2103 } else {
2104 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xff);
2105 }
2106
2107 tcg_regset_clear(tcg_target_call_clobber_regs);
2108 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_EAX);
2109 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_EDX);
2110 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_ECX);
2111 if (TCG_TARGET_REG_BITS == 64) {
2112 #if !defined(_WIN64)
2113 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_RDI);
2114 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_RSI);
2115 #endif
2116 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R8);
2117 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R9);
2118 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R10);
2119 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R11);
2120 }
2121
2122 tcg_regset_clear(s->reserved_regs);
2123 tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
2124
2125 tcg_add_target_add_op_defs(x86_op_defs);
2126 }
2127
2128 typedef struct {
2129 uint32_t len __attribute__((aligned((sizeof(void *)))));
2130 uint32_t id;
2131 uint8_t version;
2132 char augmentation[1];
2133 uint8_t code_align;
2134 uint8_t data_align;
2135 uint8_t return_column;
2136 } DebugFrameCIE;
2137
2138 typedef struct {
2139 uint32_t len __attribute__((aligned((sizeof(void *)))));
2140 uint32_t cie_offset;
2141 tcg_target_long func_start __attribute__((packed));
2142 tcg_target_long func_len __attribute__((packed));
2143 uint8_t def_cfa[4];
2144 uint8_t reg_ofs[14];
2145 } DebugFrameFDE;
2146
2147 typedef struct {
2148 DebugFrameCIE cie;
2149 DebugFrameFDE fde;
2150 } DebugFrame;
2151
2152 #if !defined(__ELF__)
2153 /* Host machine without ELF. */
2154 #elif TCG_TARGET_REG_BITS == 64
2155 #define ELF_HOST_MACHINE EM_X86_64
2156 static DebugFrame debug_frame = {
2157 .cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
2158 .cie.id = -1,
2159 .cie.version = 1,
2160 .cie.code_align = 1,
2161 .cie.data_align = 0x78, /* sleb128 -8 */
2162 .cie.return_column = 16,
2163
2164 .fde.len = sizeof(DebugFrameFDE)-4, /* length after .len member */
2165 .fde.def_cfa = {
2166 12, 7, /* DW_CFA_def_cfa %rsp, ... */
2167 (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
2168 (FRAME_SIZE >> 7)
2169 },
2170 .fde.reg_ofs = {
2171 0x90, 1, /* DW_CFA_offset, %rip, -8 */
2172 /* The following ordering must match tcg_target_callee_save_regs. */
2173 0x86, 2, /* DW_CFA_offset, %rbp, -16 */
2174 0x83, 3, /* DW_CFA_offset, %rbx, -24 */
2175 0x8c, 4, /* DW_CFA_offset, %r12, -32 */
2176 0x8d, 5, /* DW_CFA_offset, %r13, -40 */
2177 0x8e, 6, /* DW_CFA_offset, %r14, -48 */
2178 0x8f, 7, /* DW_CFA_offset, %r15, -56 */
2179 }
2180 };
2181 #else
2182 #define ELF_HOST_MACHINE EM_386
2183 static DebugFrame debug_frame = {
2184 .cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
2185 .cie.id = -1,
2186 .cie.version = 1,
2187 .cie.code_align = 1,
2188 .cie.data_align = 0x7c, /* sleb128 -4 */
2189 .cie.return_column = 8,
2190
2191 .fde.len = sizeof(DebugFrameFDE)-4, /* length after .len member */
2192 .fde.def_cfa = {
2193 12, 4, /* DW_CFA_def_cfa %esp, ... */
2194 (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
2195 (FRAME_SIZE >> 7)
2196 },
2197 .fde.reg_ofs = {
2198 0x88, 1, /* DW_CFA_offset, %eip, -4 */
2199 /* The following ordering must match tcg_target_callee_save_regs. */
2200 0x85, 2, /* DW_CFA_offset, %ebp, -8 */
2201 0x83, 3, /* DW_CFA_offset, %ebx, -12 */
2202 0x86, 4, /* DW_CFA_offset, %esi, -16 */
2203 0x87, 5, /* DW_CFA_offset, %edi, -20 */
2204 }
2205 };
2206 #endif
2207
2208 #if defined(ELF_HOST_MACHINE)
2209 void tcg_register_jit(void *buf, size_t buf_size)
2210 {
2211 /* We're expecting a 2 byte uleb128 encoded value. */
2212 assert(FRAME_SIZE >> 14 == 0);
2213
2214 debug_frame.fde.func_start = (tcg_target_long) buf;
2215 debug_frame.fde.func_len = buf_size;
2216
2217 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
2218 }
2219 #endif