]> git.proxmox.com Git - mirror_qemu.git/blob - tcg/i386/tcg-target.c
exec: move include files to include/exec/
[mirror_qemu.git] / tcg / i386 / tcg-target.c
1 /*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25 #ifndef NDEBUG
26 static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
27 #if TCG_TARGET_REG_BITS == 64
28 "%rax", "%rcx", "%rdx", "%rbx", "%rsp", "%rbp", "%rsi", "%rdi",
29 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
30 #else
31 "%eax", "%ecx", "%edx", "%ebx", "%esp", "%ebp", "%esi", "%edi",
32 #endif
33 };
34 #endif
35
36 static const int tcg_target_reg_alloc_order[] = {
37 #if TCG_TARGET_REG_BITS == 64
38 TCG_REG_RBP,
39 TCG_REG_RBX,
40 TCG_REG_R12,
41 TCG_REG_R13,
42 TCG_REG_R14,
43 TCG_REG_R15,
44 TCG_REG_R10,
45 TCG_REG_R11,
46 TCG_REG_R9,
47 TCG_REG_R8,
48 TCG_REG_RCX,
49 TCG_REG_RDX,
50 TCG_REG_RSI,
51 TCG_REG_RDI,
52 TCG_REG_RAX,
53 #else
54 TCG_REG_EBX,
55 TCG_REG_ESI,
56 TCG_REG_EDI,
57 TCG_REG_EBP,
58 TCG_REG_ECX,
59 TCG_REG_EDX,
60 TCG_REG_EAX,
61 #endif
62 };
63
64 static const int tcg_target_call_iarg_regs[] = {
65 #if TCG_TARGET_REG_BITS == 64
66 #if defined(_WIN64)
67 TCG_REG_RCX,
68 TCG_REG_RDX,
69 #else
70 TCG_REG_RDI,
71 TCG_REG_RSI,
72 TCG_REG_RDX,
73 TCG_REG_RCX,
74 #endif
75 TCG_REG_R8,
76 TCG_REG_R9,
77 #else
78 /* 32 bit mode uses stack based calling convention (GCC default). */
79 #endif
80 };
81
82 static const int tcg_target_call_oarg_regs[] = {
83 TCG_REG_EAX,
84 #if TCG_TARGET_REG_BITS == 32
85 TCG_REG_EDX
86 #endif
87 };
88
89 /* Registers used with L constraint, which are the first argument
90 registers on x86_64, and two random call clobbered registers on
91 i386. */
92 #if TCG_TARGET_REG_BITS == 64
93 # define TCG_REG_L0 tcg_target_call_iarg_regs[0]
94 # define TCG_REG_L1 tcg_target_call_iarg_regs[1]
95 #else
96 # define TCG_REG_L0 TCG_REG_EAX
97 # define TCG_REG_L1 TCG_REG_EDX
98 #endif
99
100 static uint8_t *tb_ret_addr;
101
102 static void patch_reloc(uint8_t *code_ptr, int type,
103 tcg_target_long value, tcg_target_long addend)
104 {
105 value += addend;
106 switch(type) {
107 case R_386_PC32:
108 value -= (uintptr_t)code_ptr;
109 if (value != (int32_t)value) {
110 tcg_abort();
111 }
112 *(uint32_t *)code_ptr = value;
113 break;
114 case R_386_PC8:
115 value -= (uintptr_t)code_ptr;
116 if (value != (int8_t)value) {
117 tcg_abort();
118 }
119 *(uint8_t *)code_ptr = value;
120 break;
121 default:
122 tcg_abort();
123 }
124 }
125
126 /* parse target specific constraints */
127 static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
128 {
129 const char *ct_str;
130
131 ct_str = *pct_str;
132 switch(ct_str[0]) {
133 case 'a':
134 ct->ct |= TCG_CT_REG;
135 tcg_regset_set_reg(ct->u.regs, TCG_REG_EAX);
136 break;
137 case 'b':
138 ct->ct |= TCG_CT_REG;
139 tcg_regset_set_reg(ct->u.regs, TCG_REG_EBX);
140 break;
141 case 'c':
142 ct->ct |= TCG_CT_REG;
143 tcg_regset_set_reg(ct->u.regs, TCG_REG_ECX);
144 break;
145 case 'd':
146 ct->ct |= TCG_CT_REG;
147 tcg_regset_set_reg(ct->u.regs, TCG_REG_EDX);
148 break;
149 case 'S':
150 ct->ct |= TCG_CT_REG;
151 tcg_regset_set_reg(ct->u.regs, TCG_REG_ESI);
152 break;
153 case 'D':
154 ct->ct |= TCG_CT_REG;
155 tcg_regset_set_reg(ct->u.regs, TCG_REG_EDI);
156 break;
157 case 'q':
158 ct->ct |= TCG_CT_REG;
159 if (TCG_TARGET_REG_BITS == 64) {
160 tcg_regset_set32(ct->u.regs, 0, 0xffff);
161 } else {
162 tcg_regset_set32(ct->u.regs, 0, 0xf);
163 }
164 break;
165 case 'Q':
166 ct->ct |= TCG_CT_REG;
167 tcg_regset_set32(ct->u.regs, 0, 0xf);
168 break;
169 case 'r':
170 ct->ct |= TCG_CT_REG;
171 if (TCG_TARGET_REG_BITS == 64) {
172 tcg_regset_set32(ct->u.regs, 0, 0xffff);
173 } else {
174 tcg_regset_set32(ct->u.regs, 0, 0xff);
175 }
176 break;
177
178 /* qemu_ld/st address constraint */
179 case 'L':
180 ct->ct |= TCG_CT_REG;
181 #if TCG_TARGET_REG_BITS == 64
182 tcg_regset_set32(ct->u.regs, 0, 0xffff);
183 #else
184 tcg_regset_set32(ct->u.regs, 0, 0xff);
185 #endif
186 tcg_regset_reset_reg(ct->u.regs, TCG_REG_L0);
187 tcg_regset_reset_reg(ct->u.regs, TCG_REG_L1);
188 break;
189
190 case 'e':
191 ct->ct |= TCG_CT_CONST_S32;
192 break;
193 case 'Z':
194 ct->ct |= TCG_CT_CONST_U32;
195 break;
196
197 default:
198 return -1;
199 }
200 ct_str++;
201 *pct_str = ct_str;
202 return 0;
203 }
204
205 /* test if a constant matches the constraint */
206 static inline int tcg_target_const_match(tcg_target_long val,
207 const TCGArgConstraint *arg_ct)
208 {
209 int ct = arg_ct->ct;
210 if (ct & TCG_CT_CONST) {
211 return 1;
212 }
213 if ((ct & TCG_CT_CONST_S32) && val == (int32_t)val) {
214 return 1;
215 }
216 if ((ct & TCG_CT_CONST_U32) && val == (uint32_t)val) {
217 return 1;
218 }
219 return 0;
220 }
221
222 #if TCG_TARGET_REG_BITS == 64
223 # define LOWREGMASK(x) ((x) & 7)
224 #else
225 # define LOWREGMASK(x) (x)
226 #endif
227
228 #define P_EXT 0x100 /* 0x0f opcode prefix */
229 #define P_DATA16 0x200 /* 0x66 opcode prefix */
230 #if TCG_TARGET_REG_BITS == 64
231 # define P_ADDR32 0x400 /* 0x67 opcode prefix */
232 # define P_REXW 0x800 /* Set REX.W = 1 */
233 # define P_REXB_R 0x1000 /* REG field as byte register */
234 # define P_REXB_RM 0x2000 /* R/M field as byte register */
235 # define P_GS 0x4000 /* gs segment override */
236 #else
237 # define P_ADDR32 0
238 # define P_REXW 0
239 # define P_REXB_R 0
240 # define P_REXB_RM 0
241 # define P_GS 0
242 #endif
243
244 #define OPC_ARITH_EvIz (0x81)
245 #define OPC_ARITH_EvIb (0x83)
246 #define OPC_ARITH_GvEv (0x03) /* ... plus (ARITH_FOO << 3) */
247 #define OPC_ADD_GvEv (OPC_ARITH_GvEv | (ARITH_ADD << 3))
248 #define OPC_BSWAP (0xc8 | P_EXT)
249 #define OPC_CALL_Jz (0xe8)
250 #define OPC_CMOVCC (0x40 | P_EXT) /* ... plus condition code */
251 #define OPC_CMP_GvEv (OPC_ARITH_GvEv | (ARITH_CMP << 3))
252 #define OPC_DEC_r32 (0x48)
253 #define OPC_IMUL_GvEv (0xaf | P_EXT)
254 #define OPC_IMUL_GvEvIb (0x6b)
255 #define OPC_IMUL_GvEvIz (0x69)
256 #define OPC_INC_r32 (0x40)
257 #define OPC_JCC_long (0x80 | P_EXT) /* ... plus condition code */
258 #define OPC_JCC_short (0x70) /* ... plus condition code */
259 #define OPC_JMP_long (0xe9)
260 #define OPC_JMP_short (0xeb)
261 #define OPC_LEA (0x8d)
262 #define OPC_MOVB_EvGv (0x88) /* stores, more or less */
263 #define OPC_MOVL_EvGv (0x89) /* stores, more or less */
264 #define OPC_MOVL_GvEv (0x8b) /* loads, more or less */
265 #define OPC_MOVB_EvIz (0xc6)
266 #define OPC_MOVL_EvIz (0xc7)
267 #define OPC_MOVL_Iv (0xb8)
268 #define OPC_MOVSBL (0xbe | P_EXT)
269 #define OPC_MOVSWL (0xbf | P_EXT)
270 #define OPC_MOVSLQ (0x63 | P_REXW)
271 #define OPC_MOVZBL (0xb6 | P_EXT)
272 #define OPC_MOVZWL (0xb7 | P_EXT)
273 #define OPC_POP_r32 (0x58)
274 #define OPC_PUSH_r32 (0x50)
275 #define OPC_PUSH_Iv (0x68)
276 #define OPC_PUSH_Ib (0x6a)
277 #define OPC_RET (0xc3)
278 #define OPC_SETCC (0x90 | P_EXT | P_REXB_RM) /* ... plus cc */
279 #define OPC_SHIFT_1 (0xd1)
280 #define OPC_SHIFT_Ib (0xc1)
281 #define OPC_SHIFT_cl (0xd3)
282 #define OPC_TESTL (0x85)
283 #define OPC_XCHG_ax_r32 (0x90)
284
285 #define OPC_GRP3_Ev (0xf7)
286 #define OPC_GRP5 (0xff)
287
288 /* Group 1 opcode extensions for 0x80-0x83.
289 These are also used as modifiers for OPC_ARITH. */
290 #define ARITH_ADD 0
291 #define ARITH_OR 1
292 #define ARITH_ADC 2
293 #define ARITH_SBB 3
294 #define ARITH_AND 4
295 #define ARITH_SUB 5
296 #define ARITH_XOR 6
297 #define ARITH_CMP 7
298
299 /* Group 2 opcode extensions for 0xc0, 0xc1, 0xd0-0xd3. */
300 #define SHIFT_ROL 0
301 #define SHIFT_ROR 1
302 #define SHIFT_SHL 4
303 #define SHIFT_SHR 5
304 #define SHIFT_SAR 7
305
306 /* Group 3 opcode extensions for 0xf6, 0xf7. To be used with OPC_GRP3. */
307 #define EXT3_NOT 2
308 #define EXT3_NEG 3
309 #define EXT3_MUL 4
310 #define EXT3_IMUL 5
311 #define EXT3_DIV 6
312 #define EXT3_IDIV 7
313
314 /* Group 5 opcode extensions for 0xff. To be used with OPC_GRP5. */
315 #define EXT5_INC_Ev 0
316 #define EXT5_DEC_Ev 1
317 #define EXT5_CALLN_Ev 2
318 #define EXT5_JMPN_Ev 4
319
320 /* Condition codes to be added to OPC_JCC_{long,short}. */
321 #define JCC_JMP (-1)
322 #define JCC_JO 0x0
323 #define JCC_JNO 0x1
324 #define JCC_JB 0x2
325 #define JCC_JAE 0x3
326 #define JCC_JE 0x4
327 #define JCC_JNE 0x5
328 #define JCC_JBE 0x6
329 #define JCC_JA 0x7
330 #define JCC_JS 0x8
331 #define JCC_JNS 0x9
332 #define JCC_JP 0xa
333 #define JCC_JNP 0xb
334 #define JCC_JL 0xc
335 #define JCC_JGE 0xd
336 #define JCC_JLE 0xe
337 #define JCC_JG 0xf
338
339 static const uint8_t tcg_cond_to_jcc[] = {
340 [TCG_COND_EQ] = JCC_JE,
341 [TCG_COND_NE] = JCC_JNE,
342 [TCG_COND_LT] = JCC_JL,
343 [TCG_COND_GE] = JCC_JGE,
344 [TCG_COND_LE] = JCC_JLE,
345 [TCG_COND_GT] = JCC_JG,
346 [TCG_COND_LTU] = JCC_JB,
347 [TCG_COND_GEU] = JCC_JAE,
348 [TCG_COND_LEU] = JCC_JBE,
349 [TCG_COND_GTU] = JCC_JA,
350 };
351
352 #if TCG_TARGET_REG_BITS == 64
353 static void tcg_out_opc(TCGContext *s, int opc, int r, int rm, int x)
354 {
355 int rex;
356
357 if (opc & P_GS) {
358 tcg_out8(s, 0x65);
359 }
360 if (opc & P_DATA16) {
361 /* We should never be asking for both 16 and 64-bit operation. */
362 assert((opc & P_REXW) == 0);
363 tcg_out8(s, 0x66);
364 }
365 if (opc & P_ADDR32) {
366 tcg_out8(s, 0x67);
367 }
368
369 rex = 0;
370 rex |= (opc & P_REXW) >> 8; /* REX.W */
371 rex |= (r & 8) >> 1; /* REX.R */
372 rex |= (x & 8) >> 2; /* REX.X */
373 rex |= (rm & 8) >> 3; /* REX.B */
374
375 /* P_REXB_{R,RM} indicates that the given register is the low byte.
376 For %[abcd]l we need no REX prefix, but for %{si,di,bp,sp}l we do,
377 as otherwise the encoding indicates %[abcd]h. Note that the values
378 that are ORed in merely indicate that the REX byte must be present;
379 those bits get discarded in output. */
380 rex |= opc & (r >= 4 ? P_REXB_R : 0);
381 rex |= opc & (rm >= 4 ? P_REXB_RM : 0);
382
383 if (rex) {
384 tcg_out8(s, (uint8_t)(rex | 0x40));
385 }
386
387 if (opc & P_EXT) {
388 tcg_out8(s, 0x0f);
389 }
390 tcg_out8(s, opc);
391 }
392 #else
393 static void tcg_out_opc(TCGContext *s, int opc)
394 {
395 if (opc & P_DATA16) {
396 tcg_out8(s, 0x66);
397 }
398 if (opc & P_EXT) {
399 tcg_out8(s, 0x0f);
400 }
401 tcg_out8(s, opc);
402 }
403 /* Discard the register arguments to tcg_out_opc early, so as not to penalize
404 the 32-bit compilation paths. This method works with all versions of gcc,
405 whereas relying on optimization may not be able to exclude them. */
406 #define tcg_out_opc(s, opc, r, rm, x) (tcg_out_opc)(s, opc)
407 #endif
408
409 static void tcg_out_modrm(TCGContext *s, int opc, int r, int rm)
410 {
411 tcg_out_opc(s, opc, r, rm, 0);
412 tcg_out8(s, 0xc0 | (LOWREGMASK(r) << 3) | LOWREGMASK(rm));
413 }
414
415 /* Output an opcode with a full "rm + (index<<shift) + offset" address mode.
416 We handle either RM and INDEX missing with a negative value. In 64-bit
417 mode for absolute addresses, ~RM is the size of the immediate operand
418 that will follow the instruction. */
419
420 static void tcg_out_modrm_sib_offset(TCGContext *s, int opc, int r, int rm,
421 int index, int shift,
422 tcg_target_long offset)
423 {
424 int mod, len;
425
426 if (index < 0 && rm < 0) {
427 if (TCG_TARGET_REG_BITS == 64) {
428 /* Try for a rip-relative addressing mode. This has replaced
429 the 32-bit-mode absolute addressing encoding. */
430 tcg_target_long pc = (tcg_target_long)s->code_ptr + 5 + ~rm;
431 tcg_target_long disp = offset - pc;
432 if (disp == (int32_t)disp) {
433 tcg_out_opc(s, opc, r, 0, 0);
434 tcg_out8(s, (LOWREGMASK(r) << 3) | 5);
435 tcg_out32(s, disp);
436 return;
437 }
438
439 /* Try for an absolute address encoding. This requires the
440 use of the MODRM+SIB encoding and is therefore larger than
441 rip-relative addressing. */
442 if (offset == (int32_t)offset) {
443 tcg_out_opc(s, opc, r, 0, 0);
444 tcg_out8(s, (LOWREGMASK(r) << 3) | 4);
445 tcg_out8(s, (4 << 3) | 5);
446 tcg_out32(s, offset);
447 return;
448 }
449
450 /* ??? The memory isn't directly addressable. */
451 tcg_abort();
452 } else {
453 /* Absolute address. */
454 tcg_out_opc(s, opc, r, 0, 0);
455 tcg_out8(s, (r << 3) | 5);
456 tcg_out32(s, offset);
457 return;
458 }
459 }
460
461 /* Find the length of the immediate addend. Note that the encoding
462 that would be used for (%ebp) indicates absolute addressing. */
463 if (rm < 0) {
464 mod = 0, len = 4, rm = 5;
465 } else if (offset == 0 && LOWREGMASK(rm) != TCG_REG_EBP) {
466 mod = 0, len = 0;
467 } else if (offset == (int8_t)offset) {
468 mod = 0x40, len = 1;
469 } else {
470 mod = 0x80, len = 4;
471 }
472
473 /* Use a single byte MODRM format if possible. Note that the encoding
474 that would be used for %esp is the escape to the two byte form. */
475 if (index < 0 && LOWREGMASK(rm) != TCG_REG_ESP) {
476 /* Single byte MODRM format. */
477 tcg_out_opc(s, opc, r, rm, 0);
478 tcg_out8(s, mod | (LOWREGMASK(r) << 3) | LOWREGMASK(rm));
479 } else {
480 /* Two byte MODRM+SIB format. */
481
482 /* Note that the encoding that would place %esp into the index
483 field indicates no index register. In 64-bit mode, the REX.X
484 bit counts, so %r12 can be used as the index. */
485 if (index < 0) {
486 index = 4;
487 } else {
488 assert(index != TCG_REG_ESP);
489 }
490
491 tcg_out_opc(s, opc, r, rm, index);
492 tcg_out8(s, mod | (LOWREGMASK(r) << 3) | 4);
493 tcg_out8(s, (shift << 6) | (LOWREGMASK(index) << 3) | LOWREGMASK(rm));
494 }
495
496 if (len == 1) {
497 tcg_out8(s, offset);
498 } else if (len == 4) {
499 tcg_out32(s, offset);
500 }
501 }
502
503 /* A simplification of the above with no index or shift. */
504 static inline void tcg_out_modrm_offset(TCGContext *s, int opc, int r,
505 int rm, tcg_target_long offset)
506 {
507 tcg_out_modrm_sib_offset(s, opc, r, rm, -1, 0, offset);
508 }
509
510 /* Generate dest op= src. Uses the same ARITH_* codes as tgen_arithi. */
511 static inline void tgen_arithr(TCGContext *s, int subop, int dest, int src)
512 {
513 /* Propagate an opcode prefix, such as P_REXW. */
514 int ext = subop & ~0x7;
515 subop &= 0x7;
516
517 tcg_out_modrm(s, OPC_ARITH_GvEv + (subop << 3) + ext, dest, src);
518 }
519
520 static inline void tcg_out_mov(TCGContext *s, TCGType type,
521 TCGReg ret, TCGReg arg)
522 {
523 if (arg != ret) {
524 int opc = OPC_MOVL_GvEv + (type == TCG_TYPE_I64 ? P_REXW : 0);
525 tcg_out_modrm(s, opc, ret, arg);
526 }
527 }
528
529 static void tcg_out_movi(TCGContext *s, TCGType type,
530 TCGReg ret, tcg_target_long arg)
531 {
532 if (arg == 0) {
533 tgen_arithr(s, ARITH_XOR, ret, ret);
534 return;
535 } else if (arg == (uint32_t)arg || type == TCG_TYPE_I32) {
536 tcg_out_opc(s, OPC_MOVL_Iv + LOWREGMASK(ret), 0, ret, 0);
537 tcg_out32(s, arg);
538 } else if (arg == (int32_t)arg) {
539 tcg_out_modrm(s, OPC_MOVL_EvIz + P_REXW, 0, ret);
540 tcg_out32(s, arg);
541 } else {
542 tcg_out_opc(s, OPC_MOVL_Iv + P_REXW + LOWREGMASK(ret), 0, ret, 0);
543 tcg_out32(s, arg);
544 tcg_out32(s, arg >> 31 >> 1);
545 }
546 }
547
548 static inline void tcg_out_pushi(TCGContext *s, tcg_target_long val)
549 {
550 if (val == (int8_t)val) {
551 tcg_out_opc(s, OPC_PUSH_Ib, 0, 0, 0);
552 tcg_out8(s, val);
553 } else if (val == (int32_t)val) {
554 tcg_out_opc(s, OPC_PUSH_Iv, 0, 0, 0);
555 tcg_out32(s, val);
556 } else {
557 tcg_abort();
558 }
559 }
560
561 static inline void tcg_out_push(TCGContext *s, int reg)
562 {
563 tcg_out_opc(s, OPC_PUSH_r32 + LOWREGMASK(reg), 0, reg, 0);
564 }
565
566 static inline void tcg_out_pop(TCGContext *s, int reg)
567 {
568 tcg_out_opc(s, OPC_POP_r32 + LOWREGMASK(reg), 0, reg, 0);
569 }
570
571 static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret,
572 TCGReg arg1, tcg_target_long arg2)
573 {
574 int opc = OPC_MOVL_GvEv + (type == TCG_TYPE_I64 ? P_REXW : 0);
575 tcg_out_modrm_offset(s, opc, ret, arg1, arg2);
576 }
577
578 static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
579 TCGReg arg1, tcg_target_long arg2)
580 {
581 int opc = OPC_MOVL_EvGv + (type == TCG_TYPE_I64 ? P_REXW : 0);
582 tcg_out_modrm_offset(s, opc, arg, arg1, arg2);
583 }
584
585 static void tcg_out_shifti(TCGContext *s, int subopc, int reg, int count)
586 {
587 /* Propagate an opcode prefix, such as P_DATA16. */
588 int ext = subopc & ~0x7;
589 subopc &= 0x7;
590
591 if (count == 1) {
592 tcg_out_modrm(s, OPC_SHIFT_1 + ext, subopc, reg);
593 } else {
594 tcg_out_modrm(s, OPC_SHIFT_Ib + ext, subopc, reg);
595 tcg_out8(s, count);
596 }
597 }
598
599 static inline void tcg_out_bswap32(TCGContext *s, int reg)
600 {
601 tcg_out_opc(s, OPC_BSWAP + LOWREGMASK(reg), 0, reg, 0);
602 }
603
604 static inline void tcg_out_rolw_8(TCGContext *s, int reg)
605 {
606 tcg_out_shifti(s, SHIFT_ROL + P_DATA16, reg, 8);
607 }
608
609 static inline void tcg_out_ext8u(TCGContext *s, int dest, int src)
610 {
611 /* movzbl */
612 assert(src < 4 || TCG_TARGET_REG_BITS == 64);
613 tcg_out_modrm(s, OPC_MOVZBL + P_REXB_RM, dest, src);
614 }
615
616 static void tcg_out_ext8s(TCGContext *s, int dest, int src, int rexw)
617 {
618 /* movsbl */
619 assert(src < 4 || TCG_TARGET_REG_BITS == 64);
620 tcg_out_modrm(s, OPC_MOVSBL + P_REXB_RM + rexw, dest, src);
621 }
622
623 static inline void tcg_out_ext16u(TCGContext *s, int dest, int src)
624 {
625 /* movzwl */
626 tcg_out_modrm(s, OPC_MOVZWL, dest, src);
627 }
628
629 static inline void tcg_out_ext16s(TCGContext *s, int dest, int src, int rexw)
630 {
631 /* movsw[lq] */
632 tcg_out_modrm(s, OPC_MOVSWL + rexw, dest, src);
633 }
634
635 static inline void tcg_out_ext32u(TCGContext *s, int dest, int src)
636 {
637 /* 32-bit mov zero extends. */
638 tcg_out_modrm(s, OPC_MOVL_GvEv, dest, src);
639 }
640
641 static inline void tcg_out_ext32s(TCGContext *s, int dest, int src)
642 {
643 tcg_out_modrm(s, OPC_MOVSLQ, dest, src);
644 }
645
646 static inline void tcg_out_bswap64(TCGContext *s, int reg)
647 {
648 tcg_out_opc(s, OPC_BSWAP + P_REXW + LOWREGMASK(reg), 0, reg, 0);
649 }
650
651 static void tgen_arithi(TCGContext *s, int c, int r0,
652 tcg_target_long val, int cf)
653 {
654 int rexw = 0;
655
656 if (TCG_TARGET_REG_BITS == 64) {
657 rexw = c & -8;
658 c &= 7;
659 }
660
661 /* ??? While INC is 2 bytes shorter than ADDL $1, they also induce
662 partial flags update stalls on Pentium4 and are not recommended
663 by current Intel optimization manuals. */
664 if (!cf && (c == ARITH_ADD || c == ARITH_SUB) && (val == 1 || val == -1)) {
665 int is_inc = (c == ARITH_ADD) ^ (val < 0);
666 if (TCG_TARGET_REG_BITS == 64) {
667 /* The single-byte increment encodings are re-tasked as the
668 REX prefixes. Use the MODRM encoding. */
669 tcg_out_modrm(s, OPC_GRP5 + rexw,
670 (is_inc ? EXT5_INC_Ev : EXT5_DEC_Ev), r0);
671 } else {
672 tcg_out8(s, (is_inc ? OPC_INC_r32 : OPC_DEC_r32) + r0);
673 }
674 return;
675 }
676
677 if (c == ARITH_AND) {
678 if (TCG_TARGET_REG_BITS == 64) {
679 if (val == 0xffffffffu) {
680 tcg_out_ext32u(s, r0, r0);
681 return;
682 }
683 if (val == (uint32_t)val) {
684 /* AND with no high bits set can use a 32-bit operation. */
685 rexw = 0;
686 }
687 }
688 if (val == 0xffu && (r0 < 4 || TCG_TARGET_REG_BITS == 64)) {
689 tcg_out_ext8u(s, r0, r0);
690 return;
691 }
692 if (val == 0xffffu) {
693 tcg_out_ext16u(s, r0, r0);
694 return;
695 }
696 }
697
698 if (val == (int8_t)val) {
699 tcg_out_modrm(s, OPC_ARITH_EvIb + rexw, c, r0);
700 tcg_out8(s, val);
701 return;
702 }
703 if (rexw == 0 || val == (int32_t)val) {
704 tcg_out_modrm(s, OPC_ARITH_EvIz + rexw, c, r0);
705 tcg_out32(s, val);
706 return;
707 }
708
709 tcg_abort();
710 }
711
712 static void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val)
713 {
714 if (val != 0) {
715 tgen_arithi(s, ARITH_ADD + P_REXW, reg, val, 0);
716 }
717 }
718
719 /* Use SMALL != 0 to force a short forward branch. */
720 static void tcg_out_jxx(TCGContext *s, int opc, int label_index, int small)
721 {
722 int32_t val, val1;
723 TCGLabel *l = &s->labels[label_index];
724
725 if (l->has_value) {
726 val = l->u.value - (tcg_target_long)s->code_ptr;
727 val1 = val - 2;
728 if ((int8_t)val1 == val1) {
729 if (opc == -1) {
730 tcg_out8(s, OPC_JMP_short);
731 } else {
732 tcg_out8(s, OPC_JCC_short + opc);
733 }
734 tcg_out8(s, val1);
735 } else {
736 if (small) {
737 tcg_abort();
738 }
739 if (opc == -1) {
740 tcg_out8(s, OPC_JMP_long);
741 tcg_out32(s, val - 5);
742 } else {
743 tcg_out_opc(s, OPC_JCC_long + opc, 0, 0, 0);
744 tcg_out32(s, val - 6);
745 }
746 }
747 } else if (small) {
748 if (opc == -1) {
749 tcg_out8(s, OPC_JMP_short);
750 } else {
751 tcg_out8(s, OPC_JCC_short + opc);
752 }
753 tcg_out_reloc(s, s->code_ptr, R_386_PC8, label_index, -1);
754 s->code_ptr += 1;
755 } else {
756 if (opc == -1) {
757 tcg_out8(s, OPC_JMP_long);
758 } else {
759 tcg_out_opc(s, OPC_JCC_long + opc, 0, 0, 0);
760 }
761 tcg_out_reloc(s, s->code_ptr, R_386_PC32, label_index, -4);
762 s->code_ptr += 4;
763 }
764 }
765
766 static void tcg_out_cmp(TCGContext *s, TCGArg arg1, TCGArg arg2,
767 int const_arg2, int rexw)
768 {
769 if (const_arg2) {
770 if (arg2 == 0) {
771 /* test r, r */
772 tcg_out_modrm(s, OPC_TESTL + rexw, arg1, arg1);
773 } else {
774 tgen_arithi(s, ARITH_CMP + rexw, arg1, arg2, 0);
775 }
776 } else {
777 tgen_arithr(s, ARITH_CMP + rexw, arg1, arg2);
778 }
779 }
780
781 static void tcg_out_brcond32(TCGContext *s, TCGCond cond,
782 TCGArg arg1, TCGArg arg2, int const_arg2,
783 int label_index, int small)
784 {
785 tcg_out_cmp(s, arg1, arg2, const_arg2, 0);
786 tcg_out_jxx(s, tcg_cond_to_jcc[cond], label_index, small);
787 }
788
789 #if TCG_TARGET_REG_BITS == 64
790 static void tcg_out_brcond64(TCGContext *s, TCGCond cond,
791 TCGArg arg1, TCGArg arg2, int const_arg2,
792 int label_index, int small)
793 {
794 tcg_out_cmp(s, arg1, arg2, const_arg2, P_REXW);
795 tcg_out_jxx(s, tcg_cond_to_jcc[cond], label_index, small);
796 }
797 #else
798 /* XXX: we implement it at the target level to avoid having to
799 handle cross basic blocks temporaries */
800 static void tcg_out_brcond2(TCGContext *s, const TCGArg *args,
801 const int *const_args, int small)
802 {
803 int label_next;
804 label_next = gen_new_label();
805 switch(args[4]) {
806 case TCG_COND_EQ:
807 tcg_out_brcond32(s, TCG_COND_NE, args[0], args[2], const_args[2],
808 label_next, 1);
809 tcg_out_brcond32(s, TCG_COND_EQ, args[1], args[3], const_args[3],
810 args[5], small);
811 break;
812 case TCG_COND_NE:
813 tcg_out_brcond32(s, TCG_COND_NE, args[0], args[2], const_args[2],
814 args[5], small);
815 tcg_out_brcond32(s, TCG_COND_NE, args[1], args[3], const_args[3],
816 args[5], small);
817 break;
818 case TCG_COND_LT:
819 tcg_out_brcond32(s, TCG_COND_LT, args[1], args[3], const_args[3],
820 args[5], small);
821 tcg_out_jxx(s, JCC_JNE, label_next, 1);
822 tcg_out_brcond32(s, TCG_COND_LTU, args[0], args[2], const_args[2],
823 args[5], small);
824 break;
825 case TCG_COND_LE:
826 tcg_out_brcond32(s, TCG_COND_LT, args[1], args[3], const_args[3],
827 args[5], small);
828 tcg_out_jxx(s, JCC_JNE, label_next, 1);
829 tcg_out_brcond32(s, TCG_COND_LEU, args[0], args[2], const_args[2],
830 args[5], small);
831 break;
832 case TCG_COND_GT:
833 tcg_out_brcond32(s, TCG_COND_GT, args[1], args[3], const_args[3],
834 args[5], small);
835 tcg_out_jxx(s, JCC_JNE, label_next, 1);
836 tcg_out_brcond32(s, TCG_COND_GTU, args[0], args[2], const_args[2],
837 args[5], small);
838 break;
839 case TCG_COND_GE:
840 tcg_out_brcond32(s, TCG_COND_GT, args[1], args[3], const_args[3],
841 args[5], small);
842 tcg_out_jxx(s, JCC_JNE, label_next, 1);
843 tcg_out_brcond32(s, TCG_COND_GEU, args[0], args[2], const_args[2],
844 args[5], small);
845 break;
846 case TCG_COND_LTU:
847 tcg_out_brcond32(s, TCG_COND_LTU, args[1], args[3], const_args[3],
848 args[5], small);
849 tcg_out_jxx(s, JCC_JNE, label_next, 1);
850 tcg_out_brcond32(s, TCG_COND_LTU, args[0], args[2], const_args[2],
851 args[5], small);
852 break;
853 case TCG_COND_LEU:
854 tcg_out_brcond32(s, TCG_COND_LTU, args[1], args[3], const_args[3],
855 args[5], small);
856 tcg_out_jxx(s, JCC_JNE, label_next, 1);
857 tcg_out_brcond32(s, TCG_COND_LEU, args[0], args[2], const_args[2],
858 args[5], small);
859 break;
860 case TCG_COND_GTU:
861 tcg_out_brcond32(s, TCG_COND_GTU, args[1], args[3], const_args[3],
862 args[5], small);
863 tcg_out_jxx(s, JCC_JNE, label_next, 1);
864 tcg_out_brcond32(s, TCG_COND_GTU, args[0], args[2], const_args[2],
865 args[5], small);
866 break;
867 case TCG_COND_GEU:
868 tcg_out_brcond32(s, TCG_COND_GTU, args[1], args[3], const_args[3],
869 args[5], small);
870 tcg_out_jxx(s, JCC_JNE, label_next, 1);
871 tcg_out_brcond32(s, TCG_COND_GEU, args[0], args[2], const_args[2],
872 args[5], small);
873 break;
874 default:
875 tcg_abort();
876 }
877 tcg_out_label(s, label_next, s->code_ptr);
878 }
879 #endif
880
881 static void tcg_out_setcond32(TCGContext *s, TCGCond cond, TCGArg dest,
882 TCGArg arg1, TCGArg arg2, int const_arg2)
883 {
884 tcg_out_cmp(s, arg1, arg2, const_arg2, 0);
885 tcg_out_modrm(s, OPC_SETCC | tcg_cond_to_jcc[cond], 0, dest);
886 tcg_out_ext8u(s, dest, dest);
887 }
888
889 #if TCG_TARGET_REG_BITS == 64
890 static void tcg_out_setcond64(TCGContext *s, TCGCond cond, TCGArg dest,
891 TCGArg arg1, TCGArg arg2, int const_arg2)
892 {
893 tcg_out_cmp(s, arg1, arg2, const_arg2, P_REXW);
894 tcg_out_modrm(s, OPC_SETCC | tcg_cond_to_jcc[cond], 0, dest);
895 tcg_out_ext8u(s, dest, dest);
896 }
897 #else
898 static void tcg_out_setcond2(TCGContext *s, const TCGArg *args,
899 const int *const_args)
900 {
901 TCGArg new_args[6];
902 int label_true, label_over;
903
904 memcpy(new_args, args+1, 5*sizeof(TCGArg));
905
906 if (args[0] == args[1] || args[0] == args[2]
907 || (!const_args[3] && args[0] == args[3])
908 || (!const_args[4] && args[0] == args[4])) {
909 /* When the destination overlaps with one of the argument
910 registers, don't do anything tricky. */
911 label_true = gen_new_label();
912 label_over = gen_new_label();
913
914 new_args[5] = label_true;
915 tcg_out_brcond2(s, new_args, const_args+1, 1);
916
917 tcg_out_movi(s, TCG_TYPE_I32, args[0], 0);
918 tcg_out_jxx(s, JCC_JMP, label_over, 1);
919 tcg_out_label(s, label_true, s->code_ptr);
920
921 tcg_out_movi(s, TCG_TYPE_I32, args[0], 1);
922 tcg_out_label(s, label_over, s->code_ptr);
923 } else {
924 /* When the destination does not overlap one of the arguments,
925 clear the destination first, jump if cond false, and emit an
926 increment in the true case. This results in smaller code. */
927
928 tcg_out_movi(s, TCG_TYPE_I32, args[0], 0);
929
930 label_over = gen_new_label();
931 new_args[4] = tcg_invert_cond(new_args[4]);
932 new_args[5] = label_over;
933 tcg_out_brcond2(s, new_args, const_args+1, 1);
934
935 tgen_arithi(s, ARITH_ADD, args[0], 1, 0);
936 tcg_out_label(s, label_over, s->code_ptr);
937 }
938 }
939 #endif
940
941 static void tcg_out_movcond32(TCGContext *s, TCGCond cond, TCGArg dest,
942 TCGArg c1, TCGArg c2, int const_c2,
943 TCGArg v1)
944 {
945 tcg_out_cmp(s, c1, c2, const_c2, 0);
946 tcg_out_modrm(s, OPC_CMOVCC | tcg_cond_to_jcc[cond], dest, v1);
947 }
948
949 #if TCG_TARGET_REG_BITS == 64
950 static void tcg_out_movcond64(TCGContext *s, TCGCond cond, TCGArg dest,
951 TCGArg c1, TCGArg c2, int const_c2,
952 TCGArg v1)
953 {
954 tcg_out_cmp(s, c1, c2, const_c2, P_REXW);
955 tcg_out_modrm(s, OPC_CMOVCC | tcg_cond_to_jcc[cond] | P_REXW, dest, v1);
956 }
957 #endif
958
959 static void tcg_out_branch(TCGContext *s, int call, tcg_target_long dest)
960 {
961 tcg_target_long disp = dest - (tcg_target_long)s->code_ptr - 5;
962
963 if (disp == (int32_t)disp) {
964 tcg_out_opc(s, call ? OPC_CALL_Jz : OPC_JMP_long, 0, 0, 0);
965 tcg_out32(s, disp);
966 } else {
967 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R10, dest);
968 tcg_out_modrm(s, OPC_GRP5,
969 call ? EXT5_CALLN_Ev : EXT5_JMPN_Ev, TCG_REG_R10);
970 }
971 }
972
973 static inline void tcg_out_calli(TCGContext *s, tcg_target_long dest)
974 {
975 tcg_out_branch(s, 1, dest);
976 }
977
978 static void tcg_out_jmp(TCGContext *s, tcg_target_long dest)
979 {
980 tcg_out_branch(s, 0, dest);
981 }
982
983 #if defined(CONFIG_SOFTMMU)
984
985 #include "exec/softmmu_defs.h"
986
987 /* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
988 int mmu_idx) */
989 static const void *qemu_ld_helpers[4] = {
990 helper_ldb_mmu,
991 helper_ldw_mmu,
992 helper_ldl_mmu,
993 helper_ldq_mmu,
994 };
995
996 /* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
997 uintxx_t val, int mmu_idx) */
998 static const void *qemu_st_helpers[4] = {
999 helper_stb_mmu,
1000 helper_stw_mmu,
1001 helper_stl_mmu,
1002 helper_stq_mmu,
1003 };
1004
1005 static void add_qemu_ldst_label(TCGContext *s,
1006 int is_ld,
1007 int opc,
1008 int data_reg,
1009 int data_reg2,
1010 int addrlo_reg,
1011 int addrhi_reg,
1012 int mem_index,
1013 uint8_t *raddr,
1014 uint8_t **label_ptr);
1015
1016 /* Perform the TLB load and compare.
1017
1018 Inputs:
1019 ADDRLO_IDX contains the index into ARGS of the low part of the
1020 address; the high part of the address is at ADDR_LOW_IDX+1.
1021
1022 MEM_INDEX and S_BITS are the memory context and log2 size of the load.
1023
1024 WHICH is the offset into the CPUTLBEntry structure of the slot to read.
1025 This should be offsetof addr_read or addr_write.
1026
1027 Outputs:
1028 LABEL_PTRS is filled with 1 (32-bit addresses) or 2 (64-bit addresses)
1029 positions of the displacements of forward jumps to the TLB miss case.
1030
1031 Second argument register is loaded with the low part of the address.
1032 In the TLB hit case, it has been adjusted as indicated by the TLB
1033 and so is a host address. In the TLB miss case, it continues to
1034 hold a guest address.
1035
1036 First argument register is clobbered. */
1037
1038 static inline void tcg_out_tlb_load(TCGContext *s, int addrlo_idx,
1039 int mem_index, int s_bits,
1040 const TCGArg *args,
1041 uint8_t **label_ptr, int which)
1042 {
1043 const int addrlo = args[addrlo_idx];
1044 const int r0 = TCG_REG_L0;
1045 const int r1 = TCG_REG_L1;
1046 TCGType type = TCG_TYPE_I32;
1047 int rexw = 0;
1048
1049 if (TCG_TARGET_REG_BITS == 64 && TARGET_LONG_BITS == 64) {
1050 type = TCG_TYPE_I64;
1051 rexw = P_REXW;
1052 }
1053
1054 tcg_out_mov(s, type, r0, addrlo);
1055 tcg_out_mov(s, type, r1, addrlo);
1056
1057 tcg_out_shifti(s, SHIFT_SHR + rexw, r0,
1058 TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
1059
1060 tgen_arithi(s, ARITH_AND + rexw, r1,
1061 TARGET_PAGE_MASK | ((1 << s_bits) - 1), 0);
1062 tgen_arithi(s, ARITH_AND + rexw, r0,
1063 (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS, 0);
1064
1065 tcg_out_modrm_sib_offset(s, OPC_LEA + P_REXW, r0, TCG_AREG0, r0, 0,
1066 offsetof(CPUArchState, tlb_table[mem_index][0])
1067 + which);
1068
1069 /* cmp 0(r0), r1 */
1070 tcg_out_modrm_offset(s, OPC_CMP_GvEv + rexw, r1, r0, 0);
1071
1072 tcg_out_mov(s, type, r1, addrlo);
1073
1074 /* jne slow_path */
1075 tcg_out_opc(s, OPC_JCC_long + JCC_JNE, 0, 0, 0);
1076 label_ptr[0] = s->code_ptr;
1077 s->code_ptr += 4;
1078
1079 if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
1080 /* cmp 4(r0), addrhi */
1081 tcg_out_modrm_offset(s, OPC_CMP_GvEv, args[addrlo_idx+1], r0, 4);
1082
1083 /* jne slow_path */
1084 tcg_out_opc(s, OPC_JCC_long + JCC_JNE, 0, 0, 0);
1085 label_ptr[1] = s->code_ptr;
1086 s->code_ptr += 4;
1087 }
1088
1089 /* TLB Hit. */
1090
1091 /* add addend(r0), r1 */
1092 tcg_out_modrm_offset(s, OPC_ADD_GvEv + P_REXW, r1, r0,
1093 offsetof(CPUTLBEntry, addend) - which);
1094 }
1095 #elif defined(__x86_64__) && defined(__linux__)
1096 # include <asm/prctl.h>
1097 # include <sys/prctl.h>
1098
1099 int arch_prctl(int code, unsigned long addr);
1100
1101 static int guest_base_flags;
1102 static inline void setup_guest_base_seg(void)
1103 {
1104 if (arch_prctl(ARCH_SET_GS, GUEST_BASE) == 0) {
1105 guest_base_flags = P_GS;
1106 }
1107 }
1108 #else
1109 # define guest_base_flags 0
1110 static inline void setup_guest_base_seg(void) { }
1111 #endif /* SOFTMMU */
1112
1113 static void tcg_out_qemu_ld_direct(TCGContext *s, int datalo, int datahi,
1114 int base, tcg_target_long ofs, int seg,
1115 int sizeop)
1116 {
1117 #ifdef TARGET_WORDS_BIGENDIAN
1118 const int bswap = 1;
1119 #else
1120 const int bswap = 0;
1121 #endif
1122 switch (sizeop) {
1123 case 0:
1124 tcg_out_modrm_offset(s, OPC_MOVZBL + seg, datalo, base, ofs);
1125 break;
1126 case 0 | 4:
1127 tcg_out_modrm_offset(s, OPC_MOVSBL + P_REXW + seg, datalo, base, ofs);
1128 break;
1129 case 1:
1130 tcg_out_modrm_offset(s, OPC_MOVZWL + seg, datalo, base, ofs);
1131 if (bswap) {
1132 tcg_out_rolw_8(s, datalo);
1133 }
1134 break;
1135 case 1 | 4:
1136 if (bswap) {
1137 tcg_out_modrm_offset(s, OPC_MOVZWL + seg, datalo, base, ofs);
1138 tcg_out_rolw_8(s, datalo);
1139 tcg_out_modrm(s, OPC_MOVSWL + P_REXW, datalo, datalo);
1140 } else {
1141 tcg_out_modrm_offset(s, OPC_MOVSWL + P_REXW + seg,
1142 datalo, base, ofs);
1143 }
1144 break;
1145 case 2:
1146 tcg_out_modrm_offset(s, OPC_MOVL_GvEv + seg, datalo, base, ofs);
1147 if (bswap) {
1148 tcg_out_bswap32(s, datalo);
1149 }
1150 break;
1151 #if TCG_TARGET_REG_BITS == 64
1152 case 2 | 4:
1153 if (bswap) {
1154 tcg_out_modrm_offset(s, OPC_MOVL_GvEv + seg, datalo, base, ofs);
1155 tcg_out_bswap32(s, datalo);
1156 tcg_out_ext32s(s, datalo, datalo);
1157 } else {
1158 tcg_out_modrm_offset(s, OPC_MOVSLQ + seg, datalo, base, ofs);
1159 }
1160 break;
1161 #endif
1162 case 3:
1163 if (TCG_TARGET_REG_BITS == 64) {
1164 tcg_out_modrm_offset(s, OPC_MOVL_GvEv + P_REXW + seg,
1165 datalo, base, ofs);
1166 if (bswap) {
1167 tcg_out_bswap64(s, datalo);
1168 }
1169 } else {
1170 if (bswap) {
1171 int t = datalo;
1172 datalo = datahi;
1173 datahi = t;
1174 }
1175 if (base != datalo) {
1176 tcg_out_modrm_offset(s, OPC_MOVL_GvEv + seg,
1177 datalo, base, ofs);
1178 tcg_out_modrm_offset(s, OPC_MOVL_GvEv + seg,
1179 datahi, base, ofs + 4);
1180 } else {
1181 tcg_out_modrm_offset(s, OPC_MOVL_GvEv + seg,
1182 datahi, base, ofs + 4);
1183 tcg_out_modrm_offset(s, OPC_MOVL_GvEv + seg,
1184 datalo, base, ofs);
1185 }
1186 if (bswap) {
1187 tcg_out_bswap32(s, datalo);
1188 tcg_out_bswap32(s, datahi);
1189 }
1190 }
1191 break;
1192 default:
1193 tcg_abort();
1194 }
1195 }
1196
1197 /* XXX: qemu_ld and qemu_st could be modified to clobber only EDX and
1198 EAX. It will be useful once fixed registers globals are less
1199 common. */
1200 static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args,
1201 int opc)
1202 {
1203 int data_reg, data_reg2 = 0;
1204 int addrlo_idx;
1205 #if defined(CONFIG_SOFTMMU)
1206 int mem_index, s_bits;
1207 uint8_t *label_ptr[2];
1208 #endif
1209
1210 data_reg = args[0];
1211 addrlo_idx = 1;
1212 if (TCG_TARGET_REG_BITS == 32 && opc == 3) {
1213 data_reg2 = args[1];
1214 addrlo_idx = 2;
1215 }
1216
1217 #if defined(CONFIG_SOFTMMU)
1218 mem_index = args[addrlo_idx + 1 + (TARGET_LONG_BITS > TCG_TARGET_REG_BITS)];
1219 s_bits = opc & 3;
1220
1221 tcg_out_tlb_load(s, addrlo_idx, mem_index, s_bits, args,
1222 label_ptr, offsetof(CPUTLBEntry, addr_read));
1223
1224 /* TLB Hit. */
1225 tcg_out_qemu_ld_direct(s, data_reg, data_reg2, TCG_REG_L1, 0, 0, opc);
1226
1227 /* Record the current context of a load into ldst label */
1228 add_qemu_ldst_label(s,
1229 1,
1230 opc,
1231 data_reg,
1232 data_reg2,
1233 args[addrlo_idx],
1234 args[addrlo_idx + 1],
1235 mem_index,
1236 s->code_ptr,
1237 label_ptr);
1238 #else
1239 {
1240 int32_t offset = GUEST_BASE;
1241 int base = args[addrlo_idx];
1242 int seg = 0;
1243
1244 /* ??? We assume all operations have left us with register contents
1245 that are zero extended. So far this appears to be true. If we
1246 want to enforce this, we can either do an explicit zero-extension
1247 here, or (if GUEST_BASE == 0, or a segment register is in use)
1248 use the ADDR32 prefix. For now, do nothing. */
1249 if (GUEST_BASE && guest_base_flags) {
1250 seg = guest_base_flags;
1251 offset = 0;
1252 } else if (TCG_TARGET_REG_BITS == 64 && offset != GUEST_BASE) {
1253 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_L1, GUEST_BASE);
1254 tgen_arithr(s, ARITH_ADD + P_REXW, TCG_REG_L1, base);
1255 base = TCG_REG_L1;
1256 offset = 0;
1257 }
1258
1259 tcg_out_qemu_ld_direct(s, data_reg, data_reg2, base, offset, seg, opc);
1260 }
1261 #endif
1262 }
1263
1264 static void tcg_out_qemu_st_direct(TCGContext *s, int datalo, int datahi,
1265 int base, tcg_target_long ofs, int seg,
1266 int sizeop)
1267 {
1268 #ifdef TARGET_WORDS_BIGENDIAN
1269 const int bswap = 1;
1270 #else
1271 const int bswap = 0;
1272 #endif
1273 /* ??? Ideally we wouldn't need a scratch register. For user-only,
1274 we could perform the bswap twice to restore the original value
1275 instead of moving to the scratch. But as it is, the L constraint
1276 means that TCG_REG_L0 is definitely free here. */
1277 const int scratch = TCG_REG_L0;
1278
1279 switch (sizeop) {
1280 case 0:
1281 tcg_out_modrm_offset(s, OPC_MOVB_EvGv + P_REXB_R + seg,
1282 datalo, base, ofs);
1283 break;
1284 case 1:
1285 if (bswap) {
1286 tcg_out_mov(s, TCG_TYPE_I32, scratch, datalo);
1287 tcg_out_rolw_8(s, scratch);
1288 datalo = scratch;
1289 }
1290 tcg_out_modrm_offset(s, OPC_MOVL_EvGv + P_DATA16 + seg,
1291 datalo, base, ofs);
1292 break;
1293 case 2:
1294 if (bswap) {
1295 tcg_out_mov(s, TCG_TYPE_I32, scratch, datalo);
1296 tcg_out_bswap32(s, scratch);
1297 datalo = scratch;
1298 }
1299 tcg_out_modrm_offset(s, OPC_MOVL_EvGv + seg, datalo, base, ofs);
1300 break;
1301 case 3:
1302 if (TCG_TARGET_REG_BITS == 64) {
1303 if (bswap) {
1304 tcg_out_mov(s, TCG_TYPE_I64, scratch, datalo);
1305 tcg_out_bswap64(s, scratch);
1306 datalo = scratch;
1307 }
1308 tcg_out_modrm_offset(s, OPC_MOVL_EvGv + P_REXW + seg,
1309 datalo, base, ofs);
1310 } else if (bswap) {
1311 tcg_out_mov(s, TCG_TYPE_I32, scratch, datahi);
1312 tcg_out_bswap32(s, scratch);
1313 tcg_out_modrm_offset(s, OPC_MOVL_EvGv + seg, scratch, base, ofs);
1314 tcg_out_mov(s, TCG_TYPE_I32, scratch, datalo);
1315 tcg_out_bswap32(s, scratch);
1316 tcg_out_modrm_offset(s, OPC_MOVL_EvGv + seg, scratch, base, ofs+4);
1317 } else {
1318 tcg_out_modrm_offset(s, OPC_MOVL_EvGv + seg, datalo, base, ofs);
1319 tcg_out_modrm_offset(s, OPC_MOVL_EvGv + seg, datahi, base, ofs+4);
1320 }
1321 break;
1322 default:
1323 tcg_abort();
1324 }
1325 }
1326
1327 static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args,
1328 int opc)
1329 {
1330 int data_reg, data_reg2 = 0;
1331 int addrlo_idx;
1332 #if defined(CONFIG_SOFTMMU)
1333 int mem_index, s_bits;
1334 uint8_t *label_ptr[2];
1335 #endif
1336
1337 data_reg = args[0];
1338 addrlo_idx = 1;
1339 if (TCG_TARGET_REG_BITS == 32 && opc == 3) {
1340 data_reg2 = args[1];
1341 addrlo_idx = 2;
1342 }
1343
1344 #if defined(CONFIG_SOFTMMU)
1345 mem_index = args[addrlo_idx + 1 + (TARGET_LONG_BITS > TCG_TARGET_REG_BITS)];
1346 s_bits = opc;
1347
1348 tcg_out_tlb_load(s, addrlo_idx, mem_index, s_bits, args,
1349 label_ptr, offsetof(CPUTLBEntry, addr_write));
1350
1351 /* TLB Hit. */
1352 tcg_out_qemu_st_direct(s, data_reg, data_reg2, TCG_REG_L1, 0, 0, opc);
1353
1354 /* Record the current context of a store into ldst label */
1355 add_qemu_ldst_label(s,
1356 0,
1357 opc,
1358 data_reg,
1359 data_reg2,
1360 args[addrlo_idx],
1361 args[addrlo_idx + 1],
1362 mem_index,
1363 s->code_ptr,
1364 label_ptr);
1365 #else
1366 {
1367 int32_t offset = GUEST_BASE;
1368 int base = args[addrlo_idx];
1369 int seg = 0;
1370
1371 /* ??? We assume all operations have left us with register contents
1372 that are zero extended. So far this appears to be true. If we
1373 want to enforce this, we can either do an explicit zero-extension
1374 here, or (if GUEST_BASE == 0, or a segment register is in use)
1375 use the ADDR32 prefix. For now, do nothing. */
1376 if (GUEST_BASE && guest_base_flags) {
1377 seg = guest_base_flags;
1378 offset = 0;
1379 } else if (TCG_TARGET_REG_BITS == 64 && offset != GUEST_BASE) {
1380 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_L1, GUEST_BASE);
1381 tgen_arithr(s, ARITH_ADD + P_REXW, TCG_REG_L1, base);
1382 base = TCG_REG_L1;
1383 offset = 0;
1384 }
1385
1386 tcg_out_qemu_st_direct(s, data_reg, data_reg2, base, offset, seg, opc);
1387 }
1388 #endif
1389 }
1390
1391 #if defined(CONFIG_SOFTMMU)
1392 /*
1393 * Record the context of a call to the out of line helper code for the slow path
1394 * for a load or store, so that we can later generate the correct helper code
1395 */
1396 static void add_qemu_ldst_label(TCGContext *s,
1397 int is_ld,
1398 int opc,
1399 int data_reg,
1400 int data_reg2,
1401 int addrlo_reg,
1402 int addrhi_reg,
1403 int mem_index,
1404 uint8_t *raddr,
1405 uint8_t **label_ptr)
1406 {
1407 int idx;
1408 TCGLabelQemuLdst *label;
1409
1410 if (s->nb_qemu_ldst_labels >= TCG_MAX_QEMU_LDST) {
1411 tcg_abort();
1412 }
1413
1414 idx = s->nb_qemu_ldst_labels++;
1415 label = (TCGLabelQemuLdst *)&s->qemu_ldst_labels[idx];
1416 label->is_ld = is_ld;
1417 label->opc = opc;
1418 label->datalo_reg = data_reg;
1419 label->datahi_reg = data_reg2;
1420 label->addrlo_reg = addrlo_reg;
1421 label->addrhi_reg = addrhi_reg;
1422 label->mem_index = mem_index;
1423 label->raddr = raddr;
1424 label->label_ptr[0] = label_ptr[0];
1425 if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
1426 label->label_ptr[1] = label_ptr[1];
1427 }
1428 }
1429
1430 /*
1431 * Generate code for the slow path for a load at the end of block
1432 */
1433 static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *label)
1434 {
1435 int s_bits;
1436 int opc = label->opc;
1437 int mem_index = label->mem_index;
1438 #if TCG_TARGET_REG_BITS == 32
1439 int stack_adjust;
1440 int addrlo_reg = label->addrlo_reg;
1441 int addrhi_reg = label->addrhi_reg;
1442 #endif
1443 int data_reg = label->datalo_reg;
1444 int data_reg2 = label->datahi_reg;
1445 uint8_t *raddr = label->raddr;
1446 uint8_t **label_ptr = &label->label_ptr[0];
1447
1448 s_bits = opc & 3;
1449
1450 /* resolve label address */
1451 *(uint32_t *)label_ptr[0] = (uint32_t)(s->code_ptr - label_ptr[0] - 4);
1452 if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
1453 *(uint32_t *)label_ptr[1] = (uint32_t)(s->code_ptr - label_ptr[1] - 4);
1454 }
1455
1456 #if TCG_TARGET_REG_BITS == 32
1457 tcg_out_pushi(s, mem_index);
1458 stack_adjust = 4;
1459 if (TARGET_LONG_BITS == 64) {
1460 tcg_out_push(s, addrhi_reg);
1461 stack_adjust += 4;
1462 }
1463 tcg_out_push(s, addrlo_reg);
1464 stack_adjust += 4;
1465 tcg_out_push(s, TCG_AREG0);
1466 stack_adjust += 4;
1467 #else
1468 tcg_out_mov(s, TCG_TYPE_I64, tcg_target_call_iarg_regs[0], TCG_AREG0);
1469 /* The second argument is already loaded with addrlo. */
1470 tcg_out_movi(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[2], mem_index);
1471 #endif
1472
1473 /* Code generation of qemu_ld/st's slow path calling MMU helper
1474
1475 PRE_PROC ...
1476 call MMU helper
1477 jmp POST_PROC (2b) : short forward jump <- GETRA()
1478 jmp next_code (5b) : dummy long backward jump which is never executed
1479 POST_PROC ... : do post-processing <- GETRA() + 7
1480 jmp next_code : jump to the code corresponding to next IR of qemu_ld/st
1481 */
1482
1483 tcg_out_calli(s, (tcg_target_long)qemu_ld_helpers[s_bits]);
1484
1485 /* Jump to post-processing code */
1486 tcg_out8(s, OPC_JMP_short);
1487 tcg_out8(s, 5);
1488 /* Dummy backward jump having information of fast path'pc for MMU helpers */
1489 tcg_out8(s, OPC_JMP_long);
1490 *(int32_t *)s->code_ptr = (int32_t)(raddr - s->code_ptr - 4);
1491 s->code_ptr += 4;
1492
1493 #if TCG_TARGET_REG_BITS == 32
1494 if (stack_adjust == (TCG_TARGET_REG_BITS / 8)) {
1495 /* Pop and discard. This is 2 bytes smaller than the add. */
1496 tcg_out_pop(s, TCG_REG_ECX);
1497 } else if (stack_adjust != 0) {
1498 tcg_out_addi(s, TCG_REG_CALL_STACK, stack_adjust);
1499 }
1500 #endif
1501
1502 switch(opc) {
1503 case 0 | 4:
1504 tcg_out_ext8s(s, data_reg, TCG_REG_EAX, P_REXW);
1505 break;
1506 case 1 | 4:
1507 tcg_out_ext16s(s, data_reg, TCG_REG_EAX, P_REXW);
1508 break;
1509 case 0:
1510 tcg_out_ext8u(s, data_reg, TCG_REG_EAX);
1511 break;
1512 case 1:
1513 tcg_out_ext16u(s, data_reg, TCG_REG_EAX);
1514 break;
1515 case 2:
1516 tcg_out_mov(s, TCG_TYPE_I32, data_reg, TCG_REG_EAX);
1517 break;
1518 #if TCG_TARGET_REG_BITS == 64
1519 case 2 | 4:
1520 tcg_out_ext32s(s, data_reg, TCG_REG_EAX);
1521 break;
1522 #endif
1523 case 3:
1524 if (TCG_TARGET_REG_BITS == 64) {
1525 tcg_out_mov(s, TCG_TYPE_I64, data_reg, TCG_REG_RAX);
1526 } else if (data_reg == TCG_REG_EDX) {
1527 /* xchg %edx, %eax */
1528 tcg_out_opc(s, OPC_XCHG_ax_r32 + TCG_REG_EDX, 0, 0, 0);
1529 tcg_out_mov(s, TCG_TYPE_I32, data_reg2, TCG_REG_EAX);
1530 } else {
1531 tcg_out_mov(s, TCG_TYPE_I32, data_reg, TCG_REG_EAX);
1532 tcg_out_mov(s, TCG_TYPE_I32, data_reg2, TCG_REG_EDX);
1533 }
1534 break;
1535 default:
1536 tcg_abort();
1537 }
1538
1539 /* Jump to the code corresponding to next IR of qemu_st */
1540 tcg_out_jmp(s, (tcg_target_long)raddr);
1541 }
1542
1543 /*
1544 * Generate code for the slow path for a store at the end of block
1545 */
1546 static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *label)
1547 {
1548 int s_bits;
1549 int stack_adjust;
1550 int opc = label->opc;
1551 int mem_index = label->mem_index;
1552 int data_reg = label->datalo_reg;
1553 #if TCG_TARGET_REG_BITS == 32
1554 int data_reg2 = label->datahi_reg;
1555 int addrlo_reg = label->addrlo_reg;
1556 int addrhi_reg = label->addrhi_reg;
1557 #endif
1558 uint8_t *raddr = label->raddr;
1559 uint8_t **label_ptr = &label->label_ptr[0];
1560
1561 s_bits = opc & 3;
1562
1563 /* resolve label address */
1564 *(uint32_t *)label_ptr[0] = (uint32_t)(s->code_ptr - label_ptr[0] - 4);
1565 if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
1566 *(uint32_t *)label_ptr[1] = (uint32_t)(s->code_ptr - label_ptr[1] - 4);
1567 }
1568
1569 #if TCG_TARGET_REG_BITS == 32
1570 tcg_out_pushi(s, mem_index);
1571 stack_adjust = 4;
1572 if (opc == 3) {
1573 tcg_out_push(s, data_reg2);
1574 stack_adjust += 4;
1575 }
1576 tcg_out_push(s, data_reg);
1577 stack_adjust += 4;
1578 if (TARGET_LONG_BITS == 64) {
1579 tcg_out_push(s, addrhi_reg);
1580 stack_adjust += 4;
1581 }
1582 tcg_out_push(s, addrlo_reg);
1583 stack_adjust += 4;
1584 tcg_out_push(s, TCG_AREG0);
1585 stack_adjust += 4;
1586 #else
1587 tcg_out_mov(s, TCG_TYPE_I64, tcg_target_call_iarg_regs[0], TCG_AREG0);
1588 /* The second argument is already loaded with addrlo. */
1589 tcg_out_mov(s, (opc == 3 ? TCG_TYPE_I64 : TCG_TYPE_I32),
1590 tcg_target_call_iarg_regs[2], data_reg);
1591 tcg_out_movi(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[3], mem_index);
1592 stack_adjust = 0;
1593 #endif
1594
1595 /* Code generation of qemu_ld/st's slow path calling MMU helper
1596
1597 PRE_PROC ...
1598 call MMU helper
1599 jmp POST_PROC (2b) : short forward jump <- GETRA()
1600 jmp next_code (5b) : dummy long backward jump which is never executed
1601 POST_PROC ... : do post-processing <- GETRA() + 7
1602 jmp next_code : jump to the code corresponding to next IR of qemu_ld/st
1603 */
1604
1605 tcg_out_calli(s, (tcg_target_long)qemu_st_helpers[s_bits]);
1606
1607 /* Jump to post-processing code */
1608 tcg_out8(s, OPC_JMP_short);
1609 tcg_out8(s, 5);
1610 /* Dummy backward jump having information of fast path'pc for MMU helpers */
1611 tcg_out8(s, OPC_JMP_long);
1612 *(int32_t *)s->code_ptr = (int32_t)(raddr - s->code_ptr - 4);
1613 s->code_ptr += 4;
1614
1615 if (stack_adjust == (TCG_TARGET_REG_BITS / 8)) {
1616 /* Pop and discard. This is 2 bytes smaller than the add. */
1617 tcg_out_pop(s, TCG_REG_ECX);
1618 } else if (stack_adjust != 0) {
1619 tcg_out_addi(s, TCG_REG_CALL_STACK, stack_adjust);
1620 }
1621
1622 /* Jump to the code corresponding to next IR of qemu_st */
1623 tcg_out_jmp(s, (tcg_target_long)raddr);
1624 }
1625
1626 /*
1627 * Generate TB finalization at the end of block
1628 */
1629 void tcg_out_tb_finalize(TCGContext *s)
1630 {
1631 int i;
1632 TCGLabelQemuLdst *label;
1633
1634 /* qemu_ld/st slow paths */
1635 for (i = 0; i < s->nb_qemu_ldst_labels; i++) {
1636 label = (TCGLabelQemuLdst *)&s->qemu_ldst_labels[i];
1637 if (label->is_ld) {
1638 tcg_out_qemu_ld_slow_path(s, label);
1639 } else {
1640 tcg_out_qemu_st_slow_path(s, label);
1641 }
1642 }
1643 }
1644 #endif /* CONFIG_SOFTMMU */
1645
1646 static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
1647 const TCGArg *args, const int *const_args)
1648 {
1649 int c, rexw = 0;
1650
1651 #if TCG_TARGET_REG_BITS == 64
1652 # define OP_32_64(x) \
1653 case glue(glue(INDEX_op_, x), _i64): \
1654 rexw = P_REXW; /* FALLTHRU */ \
1655 case glue(glue(INDEX_op_, x), _i32)
1656 #else
1657 # define OP_32_64(x) \
1658 case glue(glue(INDEX_op_, x), _i32)
1659 #endif
1660
1661 switch(opc) {
1662 case INDEX_op_exit_tb:
1663 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_EAX, args[0]);
1664 tcg_out_jmp(s, (tcg_target_long) tb_ret_addr);
1665 break;
1666 case INDEX_op_goto_tb:
1667 if (s->tb_jmp_offset) {
1668 /* direct jump method */
1669 tcg_out8(s, OPC_JMP_long); /* jmp im */
1670 s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
1671 tcg_out32(s, 0);
1672 } else {
1673 /* indirect jump method */
1674 tcg_out_modrm_offset(s, OPC_GRP5, EXT5_JMPN_Ev, -1,
1675 (tcg_target_long)(s->tb_next + args[0]));
1676 }
1677 s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf;
1678 break;
1679 case INDEX_op_call:
1680 if (const_args[0]) {
1681 tcg_out_calli(s, args[0]);
1682 } else {
1683 /* call *reg */
1684 tcg_out_modrm(s, OPC_GRP5, EXT5_CALLN_Ev, args[0]);
1685 }
1686 break;
1687 case INDEX_op_br:
1688 tcg_out_jxx(s, JCC_JMP, args[0], 0);
1689 break;
1690 case INDEX_op_movi_i32:
1691 tcg_out_movi(s, TCG_TYPE_I32, args[0], args[1]);
1692 break;
1693 OP_32_64(ld8u):
1694 /* Note that we can ignore REXW for the zero-extend to 64-bit. */
1695 tcg_out_modrm_offset(s, OPC_MOVZBL, args[0], args[1], args[2]);
1696 break;
1697 OP_32_64(ld8s):
1698 tcg_out_modrm_offset(s, OPC_MOVSBL + rexw, args[0], args[1], args[2]);
1699 break;
1700 OP_32_64(ld16u):
1701 /* Note that we can ignore REXW for the zero-extend to 64-bit. */
1702 tcg_out_modrm_offset(s, OPC_MOVZWL, args[0], args[1], args[2]);
1703 break;
1704 OP_32_64(ld16s):
1705 tcg_out_modrm_offset(s, OPC_MOVSWL + rexw, args[0], args[1], args[2]);
1706 break;
1707 #if TCG_TARGET_REG_BITS == 64
1708 case INDEX_op_ld32u_i64:
1709 #endif
1710 case INDEX_op_ld_i32:
1711 tcg_out_ld(s, TCG_TYPE_I32, args[0], args[1], args[2]);
1712 break;
1713
1714 OP_32_64(st8):
1715 if (const_args[0]) {
1716 tcg_out_modrm_offset(s, OPC_MOVB_EvIz,
1717 0, args[1], args[2]);
1718 tcg_out8(s, args[0]);
1719 } else {
1720 tcg_out_modrm_offset(s, OPC_MOVB_EvGv | P_REXB_R,
1721 args[0], args[1], args[2]);
1722 }
1723 break;
1724 OP_32_64(st16):
1725 if (const_args[0]) {
1726 tcg_out_modrm_offset(s, OPC_MOVL_EvIz | P_DATA16,
1727 0, args[1], args[2]);
1728 tcg_out16(s, args[0]);
1729 } else {
1730 tcg_out_modrm_offset(s, OPC_MOVL_EvGv | P_DATA16,
1731 args[0], args[1], args[2]);
1732 }
1733 break;
1734 #if TCG_TARGET_REG_BITS == 64
1735 case INDEX_op_st32_i64:
1736 #endif
1737 case INDEX_op_st_i32:
1738 if (const_args[0]) {
1739 tcg_out_modrm_offset(s, OPC_MOVL_EvIz, 0, args[1], args[2]);
1740 tcg_out32(s, args[0]);
1741 } else {
1742 tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]);
1743 }
1744 break;
1745
1746 OP_32_64(add):
1747 /* For 3-operand addition, use LEA. */
1748 if (args[0] != args[1]) {
1749 TCGArg a0 = args[0], a1 = args[1], a2 = args[2], c3 = 0;
1750
1751 if (const_args[2]) {
1752 c3 = a2, a2 = -1;
1753 } else if (a0 == a2) {
1754 /* Watch out for dest = src + dest, since we've removed
1755 the matching constraint on the add. */
1756 tgen_arithr(s, ARITH_ADD + rexw, a0, a1);
1757 break;
1758 }
1759
1760 tcg_out_modrm_sib_offset(s, OPC_LEA + rexw, a0, a1, a2, 0, c3);
1761 break;
1762 }
1763 c = ARITH_ADD;
1764 goto gen_arith;
1765 OP_32_64(sub):
1766 c = ARITH_SUB;
1767 goto gen_arith;
1768 OP_32_64(and):
1769 c = ARITH_AND;
1770 goto gen_arith;
1771 OP_32_64(or):
1772 c = ARITH_OR;
1773 goto gen_arith;
1774 OP_32_64(xor):
1775 c = ARITH_XOR;
1776 goto gen_arith;
1777 gen_arith:
1778 if (const_args[2]) {
1779 tgen_arithi(s, c + rexw, args[0], args[2], 0);
1780 } else {
1781 tgen_arithr(s, c + rexw, args[0], args[2]);
1782 }
1783 break;
1784
1785 OP_32_64(mul):
1786 if (const_args[2]) {
1787 int32_t val;
1788 val = args[2];
1789 if (val == (int8_t)val) {
1790 tcg_out_modrm(s, OPC_IMUL_GvEvIb + rexw, args[0], args[0]);
1791 tcg_out8(s, val);
1792 } else {
1793 tcg_out_modrm(s, OPC_IMUL_GvEvIz + rexw, args[0], args[0]);
1794 tcg_out32(s, val);
1795 }
1796 } else {
1797 tcg_out_modrm(s, OPC_IMUL_GvEv + rexw, args[0], args[2]);
1798 }
1799 break;
1800
1801 OP_32_64(div2):
1802 tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_IDIV, args[4]);
1803 break;
1804 OP_32_64(divu2):
1805 tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_DIV, args[4]);
1806 break;
1807
1808 OP_32_64(shl):
1809 c = SHIFT_SHL;
1810 goto gen_shift;
1811 OP_32_64(shr):
1812 c = SHIFT_SHR;
1813 goto gen_shift;
1814 OP_32_64(sar):
1815 c = SHIFT_SAR;
1816 goto gen_shift;
1817 OP_32_64(rotl):
1818 c = SHIFT_ROL;
1819 goto gen_shift;
1820 OP_32_64(rotr):
1821 c = SHIFT_ROR;
1822 goto gen_shift;
1823 gen_shift:
1824 if (const_args[2]) {
1825 tcg_out_shifti(s, c + rexw, args[0], args[2]);
1826 } else {
1827 tcg_out_modrm(s, OPC_SHIFT_cl + rexw, c, args[0]);
1828 }
1829 break;
1830
1831 case INDEX_op_brcond_i32:
1832 tcg_out_brcond32(s, args[2], args[0], args[1], const_args[1],
1833 args[3], 0);
1834 break;
1835 case INDEX_op_setcond_i32:
1836 tcg_out_setcond32(s, args[3], args[0], args[1],
1837 args[2], const_args[2]);
1838 break;
1839 case INDEX_op_movcond_i32:
1840 tcg_out_movcond32(s, args[5], args[0], args[1],
1841 args[2], const_args[2], args[3]);
1842 break;
1843
1844 OP_32_64(bswap16):
1845 tcg_out_rolw_8(s, args[0]);
1846 break;
1847 OP_32_64(bswap32):
1848 tcg_out_bswap32(s, args[0]);
1849 break;
1850
1851 OP_32_64(neg):
1852 tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_NEG, args[0]);
1853 break;
1854 OP_32_64(not):
1855 tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_NOT, args[0]);
1856 break;
1857
1858 OP_32_64(ext8s):
1859 tcg_out_ext8s(s, args[0], args[1], rexw);
1860 break;
1861 OP_32_64(ext16s):
1862 tcg_out_ext16s(s, args[0], args[1], rexw);
1863 break;
1864 OP_32_64(ext8u):
1865 tcg_out_ext8u(s, args[0], args[1]);
1866 break;
1867 OP_32_64(ext16u):
1868 tcg_out_ext16u(s, args[0], args[1]);
1869 break;
1870
1871 case INDEX_op_qemu_ld8u:
1872 tcg_out_qemu_ld(s, args, 0);
1873 break;
1874 case INDEX_op_qemu_ld8s:
1875 tcg_out_qemu_ld(s, args, 0 | 4);
1876 break;
1877 case INDEX_op_qemu_ld16u:
1878 tcg_out_qemu_ld(s, args, 1);
1879 break;
1880 case INDEX_op_qemu_ld16s:
1881 tcg_out_qemu_ld(s, args, 1 | 4);
1882 break;
1883 #if TCG_TARGET_REG_BITS == 64
1884 case INDEX_op_qemu_ld32u:
1885 #endif
1886 case INDEX_op_qemu_ld32:
1887 tcg_out_qemu_ld(s, args, 2);
1888 break;
1889 case INDEX_op_qemu_ld64:
1890 tcg_out_qemu_ld(s, args, 3);
1891 break;
1892
1893 case INDEX_op_qemu_st8:
1894 tcg_out_qemu_st(s, args, 0);
1895 break;
1896 case INDEX_op_qemu_st16:
1897 tcg_out_qemu_st(s, args, 1);
1898 break;
1899 case INDEX_op_qemu_st32:
1900 tcg_out_qemu_st(s, args, 2);
1901 break;
1902 case INDEX_op_qemu_st64:
1903 tcg_out_qemu_st(s, args, 3);
1904 break;
1905
1906 #if TCG_TARGET_REG_BITS == 32
1907 case INDEX_op_brcond2_i32:
1908 tcg_out_brcond2(s, args, const_args, 0);
1909 break;
1910 case INDEX_op_setcond2_i32:
1911 tcg_out_setcond2(s, args, const_args);
1912 break;
1913 case INDEX_op_mulu2_i32:
1914 tcg_out_modrm(s, OPC_GRP3_Ev, EXT3_MUL, args[3]);
1915 break;
1916 case INDEX_op_add2_i32:
1917 if (const_args[4]) {
1918 tgen_arithi(s, ARITH_ADD, args[0], args[4], 1);
1919 } else {
1920 tgen_arithr(s, ARITH_ADD, args[0], args[4]);
1921 }
1922 if (const_args[5]) {
1923 tgen_arithi(s, ARITH_ADC, args[1], args[5], 1);
1924 } else {
1925 tgen_arithr(s, ARITH_ADC, args[1], args[5]);
1926 }
1927 break;
1928 case INDEX_op_sub2_i32:
1929 if (const_args[4]) {
1930 tgen_arithi(s, ARITH_SUB, args[0], args[4], 1);
1931 } else {
1932 tgen_arithr(s, ARITH_SUB, args[0], args[4]);
1933 }
1934 if (const_args[5]) {
1935 tgen_arithi(s, ARITH_SBB, args[1], args[5], 1);
1936 } else {
1937 tgen_arithr(s, ARITH_SBB, args[1], args[5]);
1938 }
1939 break;
1940 #else /* TCG_TARGET_REG_BITS == 64 */
1941 case INDEX_op_movi_i64:
1942 tcg_out_movi(s, TCG_TYPE_I64, args[0], args[1]);
1943 break;
1944 case INDEX_op_ld32s_i64:
1945 tcg_out_modrm_offset(s, OPC_MOVSLQ, args[0], args[1], args[2]);
1946 break;
1947 case INDEX_op_ld_i64:
1948 tcg_out_ld(s, TCG_TYPE_I64, args[0], args[1], args[2]);
1949 break;
1950 case INDEX_op_st_i64:
1951 if (const_args[0]) {
1952 tcg_out_modrm_offset(s, OPC_MOVL_EvIz | P_REXW,
1953 0, args[1], args[2]);
1954 tcg_out32(s, args[0]);
1955 } else {
1956 tcg_out_st(s, TCG_TYPE_I64, args[0], args[1], args[2]);
1957 }
1958 break;
1959 case INDEX_op_qemu_ld32s:
1960 tcg_out_qemu_ld(s, args, 2 | 4);
1961 break;
1962
1963 case INDEX_op_brcond_i64:
1964 tcg_out_brcond64(s, args[2], args[0], args[1], const_args[1],
1965 args[3], 0);
1966 break;
1967 case INDEX_op_setcond_i64:
1968 tcg_out_setcond64(s, args[3], args[0], args[1],
1969 args[2], const_args[2]);
1970 break;
1971 case INDEX_op_movcond_i64:
1972 tcg_out_movcond64(s, args[5], args[0], args[1],
1973 args[2], const_args[2], args[3]);
1974 break;
1975
1976 case INDEX_op_bswap64_i64:
1977 tcg_out_bswap64(s, args[0]);
1978 break;
1979 case INDEX_op_ext32u_i64:
1980 tcg_out_ext32u(s, args[0], args[1]);
1981 break;
1982 case INDEX_op_ext32s_i64:
1983 tcg_out_ext32s(s, args[0], args[1]);
1984 break;
1985 #endif
1986
1987 OP_32_64(deposit):
1988 if (args[3] == 0 && args[4] == 8) {
1989 /* load bits 0..7 */
1990 tcg_out_modrm(s, OPC_MOVB_EvGv | P_REXB_R | P_REXB_RM,
1991 args[2], args[0]);
1992 } else if (args[3] == 8 && args[4] == 8) {
1993 /* load bits 8..15 */
1994 tcg_out_modrm(s, OPC_MOVB_EvGv, args[2], args[0] + 4);
1995 } else if (args[3] == 0 && args[4] == 16) {
1996 /* load bits 0..15 */
1997 tcg_out_modrm(s, OPC_MOVL_EvGv | P_DATA16, args[2], args[0]);
1998 } else {
1999 tcg_abort();
2000 }
2001 break;
2002
2003 default:
2004 tcg_abort();
2005 }
2006
2007 #undef OP_32_64
2008 }
2009
2010 static const TCGTargetOpDef x86_op_defs[] = {
2011 { INDEX_op_exit_tb, { } },
2012 { INDEX_op_goto_tb, { } },
2013 { INDEX_op_call, { "ri" } },
2014 { INDEX_op_br, { } },
2015 { INDEX_op_mov_i32, { "r", "r" } },
2016 { INDEX_op_movi_i32, { "r" } },
2017 { INDEX_op_ld8u_i32, { "r", "r" } },
2018 { INDEX_op_ld8s_i32, { "r", "r" } },
2019 { INDEX_op_ld16u_i32, { "r", "r" } },
2020 { INDEX_op_ld16s_i32, { "r", "r" } },
2021 { INDEX_op_ld_i32, { "r", "r" } },
2022 { INDEX_op_st8_i32, { "qi", "r" } },
2023 { INDEX_op_st16_i32, { "ri", "r" } },
2024 { INDEX_op_st_i32, { "ri", "r" } },
2025
2026 { INDEX_op_add_i32, { "r", "r", "ri" } },
2027 { INDEX_op_sub_i32, { "r", "0", "ri" } },
2028 { INDEX_op_mul_i32, { "r", "0", "ri" } },
2029 { INDEX_op_div2_i32, { "a", "d", "0", "1", "r" } },
2030 { INDEX_op_divu2_i32, { "a", "d", "0", "1", "r" } },
2031 { INDEX_op_and_i32, { "r", "0", "ri" } },
2032 { INDEX_op_or_i32, { "r", "0", "ri" } },
2033 { INDEX_op_xor_i32, { "r", "0", "ri" } },
2034
2035 { INDEX_op_shl_i32, { "r", "0", "ci" } },
2036 { INDEX_op_shr_i32, { "r", "0", "ci" } },
2037 { INDEX_op_sar_i32, { "r", "0", "ci" } },
2038 { INDEX_op_rotl_i32, { "r", "0", "ci" } },
2039 { INDEX_op_rotr_i32, { "r", "0", "ci" } },
2040
2041 { INDEX_op_brcond_i32, { "r", "ri" } },
2042
2043 { INDEX_op_bswap16_i32, { "r", "0" } },
2044 { INDEX_op_bswap32_i32, { "r", "0" } },
2045
2046 { INDEX_op_neg_i32, { "r", "0" } },
2047
2048 { INDEX_op_not_i32, { "r", "0" } },
2049
2050 { INDEX_op_ext8s_i32, { "r", "q" } },
2051 { INDEX_op_ext16s_i32, { "r", "r" } },
2052 { INDEX_op_ext8u_i32, { "r", "q" } },
2053 { INDEX_op_ext16u_i32, { "r", "r" } },
2054
2055 { INDEX_op_setcond_i32, { "q", "r", "ri" } },
2056
2057 { INDEX_op_deposit_i32, { "Q", "0", "Q" } },
2058 #if TCG_TARGET_HAS_movcond_i32
2059 { INDEX_op_movcond_i32, { "r", "r", "ri", "r", "0" } },
2060 #endif
2061
2062 #if TCG_TARGET_REG_BITS == 32
2063 { INDEX_op_mulu2_i32, { "a", "d", "a", "r" } },
2064 { INDEX_op_add2_i32, { "r", "r", "0", "1", "ri", "ri" } },
2065 { INDEX_op_sub2_i32, { "r", "r", "0", "1", "ri", "ri" } },
2066 { INDEX_op_brcond2_i32, { "r", "r", "ri", "ri" } },
2067 { INDEX_op_setcond2_i32, { "r", "r", "r", "ri", "ri" } },
2068 #else
2069 { INDEX_op_mov_i64, { "r", "r" } },
2070 { INDEX_op_movi_i64, { "r" } },
2071 { INDEX_op_ld8u_i64, { "r", "r" } },
2072 { INDEX_op_ld8s_i64, { "r", "r" } },
2073 { INDEX_op_ld16u_i64, { "r", "r" } },
2074 { INDEX_op_ld16s_i64, { "r", "r" } },
2075 { INDEX_op_ld32u_i64, { "r", "r" } },
2076 { INDEX_op_ld32s_i64, { "r", "r" } },
2077 { INDEX_op_ld_i64, { "r", "r" } },
2078 { INDEX_op_st8_i64, { "ri", "r" } },
2079 { INDEX_op_st16_i64, { "ri", "r" } },
2080 { INDEX_op_st32_i64, { "ri", "r" } },
2081 { INDEX_op_st_i64, { "re", "r" } },
2082
2083 { INDEX_op_add_i64, { "r", "0", "re" } },
2084 { INDEX_op_mul_i64, { "r", "0", "re" } },
2085 { INDEX_op_div2_i64, { "a", "d", "0", "1", "r" } },
2086 { INDEX_op_divu2_i64, { "a", "d", "0", "1", "r" } },
2087 { INDEX_op_sub_i64, { "r", "0", "re" } },
2088 { INDEX_op_and_i64, { "r", "0", "reZ" } },
2089 { INDEX_op_or_i64, { "r", "0", "re" } },
2090 { INDEX_op_xor_i64, { "r", "0", "re" } },
2091
2092 { INDEX_op_shl_i64, { "r", "0", "ci" } },
2093 { INDEX_op_shr_i64, { "r", "0", "ci" } },
2094 { INDEX_op_sar_i64, { "r", "0", "ci" } },
2095 { INDEX_op_rotl_i64, { "r", "0", "ci" } },
2096 { INDEX_op_rotr_i64, { "r", "0", "ci" } },
2097
2098 { INDEX_op_brcond_i64, { "r", "re" } },
2099 { INDEX_op_setcond_i64, { "r", "r", "re" } },
2100
2101 { INDEX_op_bswap16_i64, { "r", "0" } },
2102 { INDEX_op_bswap32_i64, { "r", "0" } },
2103 { INDEX_op_bswap64_i64, { "r", "0" } },
2104 { INDEX_op_neg_i64, { "r", "0" } },
2105 { INDEX_op_not_i64, { "r", "0" } },
2106
2107 { INDEX_op_ext8s_i64, { "r", "r" } },
2108 { INDEX_op_ext16s_i64, { "r", "r" } },
2109 { INDEX_op_ext32s_i64, { "r", "r" } },
2110 { INDEX_op_ext8u_i64, { "r", "r" } },
2111 { INDEX_op_ext16u_i64, { "r", "r" } },
2112 { INDEX_op_ext32u_i64, { "r", "r" } },
2113
2114 { INDEX_op_deposit_i64, { "Q", "0", "Q" } },
2115 { INDEX_op_movcond_i64, { "r", "r", "re", "r", "0" } },
2116 #endif
2117
2118 #if TCG_TARGET_REG_BITS == 64
2119 { INDEX_op_qemu_ld8u, { "r", "L" } },
2120 { INDEX_op_qemu_ld8s, { "r", "L" } },
2121 { INDEX_op_qemu_ld16u, { "r", "L" } },
2122 { INDEX_op_qemu_ld16s, { "r", "L" } },
2123 { INDEX_op_qemu_ld32, { "r", "L" } },
2124 { INDEX_op_qemu_ld32u, { "r", "L" } },
2125 { INDEX_op_qemu_ld32s, { "r", "L" } },
2126 { INDEX_op_qemu_ld64, { "r", "L" } },
2127
2128 { INDEX_op_qemu_st8, { "L", "L" } },
2129 { INDEX_op_qemu_st16, { "L", "L" } },
2130 { INDEX_op_qemu_st32, { "L", "L" } },
2131 { INDEX_op_qemu_st64, { "L", "L" } },
2132 #elif TARGET_LONG_BITS <= TCG_TARGET_REG_BITS
2133 { INDEX_op_qemu_ld8u, { "r", "L" } },
2134 { INDEX_op_qemu_ld8s, { "r", "L" } },
2135 { INDEX_op_qemu_ld16u, { "r", "L" } },
2136 { INDEX_op_qemu_ld16s, { "r", "L" } },
2137 { INDEX_op_qemu_ld32, { "r", "L" } },
2138 { INDEX_op_qemu_ld64, { "r", "r", "L" } },
2139
2140 { INDEX_op_qemu_st8, { "cb", "L" } },
2141 { INDEX_op_qemu_st16, { "L", "L" } },
2142 { INDEX_op_qemu_st32, { "L", "L" } },
2143 { INDEX_op_qemu_st64, { "L", "L", "L" } },
2144 #else
2145 { INDEX_op_qemu_ld8u, { "r", "L", "L" } },
2146 { INDEX_op_qemu_ld8s, { "r", "L", "L" } },
2147 { INDEX_op_qemu_ld16u, { "r", "L", "L" } },
2148 { INDEX_op_qemu_ld16s, { "r", "L", "L" } },
2149 { INDEX_op_qemu_ld32, { "r", "L", "L" } },
2150 { INDEX_op_qemu_ld64, { "r", "r", "L", "L" } },
2151
2152 { INDEX_op_qemu_st8, { "cb", "L", "L" } },
2153 { INDEX_op_qemu_st16, { "L", "L", "L" } },
2154 { INDEX_op_qemu_st32, { "L", "L", "L" } },
2155 { INDEX_op_qemu_st64, { "L", "L", "L", "L" } },
2156 #endif
2157 { -1 },
2158 };
2159
2160 static int tcg_target_callee_save_regs[] = {
2161 #if TCG_TARGET_REG_BITS == 64
2162 TCG_REG_RBP,
2163 TCG_REG_RBX,
2164 #if defined(_WIN64)
2165 TCG_REG_RDI,
2166 TCG_REG_RSI,
2167 #endif
2168 TCG_REG_R12,
2169 TCG_REG_R13,
2170 TCG_REG_R14, /* Currently used for the global env. */
2171 TCG_REG_R15,
2172 #else
2173 TCG_REG_EBP, /* Currently used for the global env. */
2174 TCG_REG_EBX,
2175 TCG_REG_ESI,
2176 TCG_REG_EDI,
2177 #endif
2178 };
2179
2180 /* Compute frame size via macros, to share between tcg_target_qemu_prologue
2181 and tcg_register_jit. */
2182
2183 #define PUSH_SIZE \
2184 ((1 + ARRAY_SIZE(tcg_target_callee_save_regs)) \
2185 * (TCG_TARGET_REG_BITS / 8))
2186
2187 #define FRAME_SIZE \
2188 ((PUSH_SIZE \
2189 + TCG_STATIC_CALL_ARGS_SIZE \
2190 + CPU_TEMP_BUF_NLONGS * sizeof(long) \
2191 + TCG_TARGET_STACK_ALIGN - 1) \
2192 & ~(TCG_TARGET_STACK_ALIGN - 1))
2193
2194 /* Generate global QEMU prologue and epilogue code */
2195 static void tcg_target_qemu_prologue(TCGContext *s)
2196 {
2197 int i, stack_addend;
2198
2199 /* TB prologue */
2200
2201 /* Reserve some stack space, also for TCG temps. */
2202 stack_addend = FRAME_SIZE - PUSH_SIZE;
2203 tcg_set_frame(s, TCG_REG_CALL_STACK, TCG_STATIC_CALL_ARGS_SIZE,
2204 CPU_TEMP_BUF_NLONGS * sizeof(long));
2205
2206 /* Save all callee saved registers. */
2207 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
2208 tcg_out_push(s, tcg_target_callee_save_regs[i]);
2209 }
2210
2211 #if TCG_TARGET_REG_BITS == 32
2212 tcg_out_ld(s, TCG_TYPE_PTR, TCG_AREG0, TCG_REG_ESP,
2213 (ARRAY_SIZE(tcg_target_callee_save_regs) + 1) * 4);
2214 tcg_out_addi(s, TCG_REG_ESP, -stack_addend);
2215 /* jmp *tb. */
2216 tcg_out_modrm_offset(s, OPC_GRP5, EXT5_JMPN_Ev, TCG_REG_ESP,
2217 (ARRAY_SIZE(tcg_target_callee_save_regs) + 2) * 4
2218 + stack_addend);
2219 #else
2220 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
2221 tcg_out_addi(s, TCG_REG_ESP, -stack_addend);
2222 /* jmp *tb. */
2223 tcg_out_modrm(s, OPC_GRP5, EXT5_JMPN_Ev, tcg_target_call_iarg_regs[1]);
2224 #endif
2225
2226 /* TB epilogue */
2227 tb_ret_addr = s->code_ptr;
2228
2229 tcg_out_addi(s, TCG_REG_CALL_STACK, stack_addend);
2230
2231 for (i = ARRAY_SIZE(tcg_target_callee_save_regs) - 1; i >= 0; i--) {
2232 tcg_out_pop(s, tcg_target_callee_save_regs[i]);
2233 }
2234 tcg_out_opc(s, OPC_RET, 0, 0, 0);
2235
2236 #if !defined(CONFIG_SOFTMMU)
2237 /* Try to set up a segment register to point to GUEST_BASE. */
2238 if (GUEST_BASE) {
2239 setup_guest_base_seg();
2240 }
2241 #endif
2242 }
2243
2244 static void tcg_target_init(TCGContext *s)
2245 {
2246 #if !defined(CONFIG_USER_ONLY)
2247 /* fail safe */
2248 if ((1 << CPU_TLB_ENTRY_BITS) != sizeof(CPUTLBEntry))
2249 tcg_abort();
2250 #endif
2251
2252 if (TCG_TARGET_REG_BITS == 64) {
2253 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffff);
2254 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I64], 0, 0xffff);
2255 } else {
2256 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xff);
2257 }
2258
2259 tcg_regset_clear(tcg_target_call_clobber_regs);
2260 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_EAX);
2261 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_EDX);
2262 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_ECX);
2263 if (TCG_TARGET_REG_BITS == 64) {
2264 #if !defined(_WIN64)
2265 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_RDI);
2266 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_RSI);
2267 #endif
2268 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R8);
2269 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R9);
2270 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R10);
2271 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R11);
2272 }
2273
2274 tcg_regset_clear(s->reserved_regs);
2275 tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
2276
2277 tcg_add_target_add_op_defs(x86_op_defs);
2278 }
2279
2280 typedef struct {
2281 uint32_t len __attribute__((aligned((sizeof(void *)))));
2282 uint32_t id;
2283 uint8_t version;
2284 char augmentation[1];
2285 uint8_t code_align;
2286 uint8_t data_align;
2287 uint8_t return_column;
2288 } DebugFrameCIE;
2289
2290 typedef struct {
2291 uint32_t len __attribute__((aligned((sizeof(void *)))));
2292 uint32_t cie_offset;
2293 tcg_target_long func_start __attribute__((packed));
2294 tcg_target_long func_len __attribute__((packed));
2295 uint8_t def_cfa[4];
2296 uint8_t reg_ofs[14];
2297 } DebugFrameFDE;
2298
2299 typedef struct {
2300 DebugFrameCIE cie;
2301 DebugFrameFDE fde;
2302 } DebugFrame;
2303
2304 #if !defined(__ELF__)
2305 /* Host machine without ELF. */
2306 #elif TCG_TARGET_REG_BITS == 64
2307 #define ELF_HOST_MACHINE EM_X86_64
2308 static DebugFrame debug_frame = {
2309 .cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
2310 .cie.id = -1,
2311 .cie.version = 1,
2312 .cie.code_align = 1,
2313 .cie.data_align = 0x78, /* sleb128 -8 */
2314 .cie.return_column = 16,
2315
2316 .fde.len = sizeof(DebugFrameFDE)-4, /* length after .len member */
2317 .fde.def_cfa = {
2318 12, 7, /* DW_CFA_def_cfa %rsp, ... */
2319 (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
2320 (FRAME_SIZE >> 7)
2321 },
2322 .fde.reg_ofs = {
2323 0x90, 1, /* DW_CFA_offset, %rip, -8 */
2324 /* The following ordering must match tcg_target_callee_save_regs. */
2325 0x86, 2, /* DW_CFA_offset, %rbp, -16 */
2326 0x83, 3, /* DW_CFA_offset, %rbx, -24 */
2327 0x8c, 4, /* DW_CFA_offset, %r12, -32 */
2328 0x8d, 5, /* DW_CFA_offset, %r13, -40 */
2329 0x8e, 6, /* DW_CFA_offset, %r14, -48 */
2330 0x8f, 7, /* DW_CFA_offset, %r15, -56 */
2331 }
2332 };
2333 #else
2334 #define ELF_HOST_MACHINE EM_386
2335 static DebugFrame debug_frame = {
2336 .cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
2337 .cie.id = -1,
2338 .cie.version = 1,
2339 .cie.code_align = 1,
2340 .cie.data_align = 0x7c, /* sleb128 -4 */
2341 .cie.return_column = 8,
2342
2343 .fde.len = sizeof(DebugFrameFDE)-4, /* length after .len member */
2344 .fde.def_cfa = {
2345 12, 4, /* DW_CFA_def_cfa %esp, ... */
2346 (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
2347 (FRAME_SIZE >> 7)
2348 },
2349 .fde.reg_ofs = {
2350 0x88, 1, /* DW_CFA_offset, %eip, -4 */
2351 /* The following ordering must match tcg_target_callee_save_regs. */
2352 0x85, 2, /* DW_CFA_offset, %ebp, -8 */
2353 0x83, 3, /* DW_CFA_offset, %ebx, -12 */
2354 0x86, 4, /* DW_CFA_offset, %esi, -16 */
2355 0x87, 5, /* DW_CFA_offset, %edi, -20 */
2356 }
2357 };
2358 #endif
2359
2360 #if defined(ELF_HOST_MACHINE)
2361 void tcg_register_jit(void *buf, size_t buf_size)
2362 {
2363 /* We're expecting a 2 byte uleb128 encoded value. */
2364 assert(FRAME_SIZE >> 14 == 0);
2365
2366 debug_frame.fde.func_start = (tcg_target_long) buf;
2367 debug_frame.fde.func_len = buf_size;
2368
2369 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
2370 }
2371 #endif