2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 #include "tcg-pool.inc.c"
27 #ifdef CONFIG_DEBUG_TCG
28 static const char * const tcg_target_reg_names
[TCG_TARGET_NB_REGS
] = {
29 #if TCG_TARGET_REG_BITS == 64
30 "%rax", "%rcx", "%rdx", "%rbx", "%rsp", "%rbp", "%rsi", "%rdi",
32 "%eax", "%ecx", "%edx", "%ebx", "%esp", "%ebp", "%esi", "%edi",
34 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
35 "%xmm0", "%xmm1", "%xmm2", "%xmm3", "%xmm4", "%xmm5", "%xmm6", "%xmm7",
36 #if TCG_TARGET_REG_BITS == 64
37 "%xmm8", "%xmm9", "%xmm10", "%xmm11",
38 "%xmm12", "%xmm13", "%xmm14", "%xmm15",
43 static const int tcg_target_reg_alloc_order
[] = {
44 #if TCG_TARGET_REG_BITS == 64
76 /* The Win64 ABI has xmm6-xmm15 as caller-saves, and we do not save
77 any of them. Therefore only allow xmm0-xmm5 to be allocated. */
80 #if TCG_TARGET_REG_BITS == 64
93 static const int tcg_target_call_iarg_regs
[] = {
94 #if TCG_TARGET_REG_BITS == 64
107 /* 32 bit mode uses stack based calling convention (GCC default). */
111 static const int tcg_target_call_oarg_regs
[] = {
113 #if TCG_TARGET_REG_BITS == 32
118 /* Constants we accept. */
119 #define TCG_CT_CONST_S32 0x100
120 #define TCG_CT_CONST_U32 0x200
121 #define TCG_CT_CONST_I32 0x400
122 #define TCG_CT_CONST_WSZ 0x800
124 /* Registers used with L constraint, which are the first argument
125 registers on x86_64, and two random call clobbered registers on
127 #if TCG_TARGET_REG_BITS == 64
128 # define TCG_REG_L0 tcg_target_call_iarg_regs[0]
129 # define TCG_REG_L1 tcg_target_call_iarg_regs[1]
131 # define TCG_REG_L0 TCG_REG_EAX
132 # define TCG_REG_L1 TCG_REG_EDX
135 /* The host compiler should supply <cpuid.h> to enable runtime features
136 detection, as we're not going to go so far as our own inline assembly.
137 If not available, default values will be assumed. */
138 #if defined(CONFIG_CPUID_H)
139 #include "qemu/cpuid.h"
142 /* For 64-bit, we always know that CMOV is available. */
143 #if TCG_TARGET_REG_BITS == 64
145 #elif defined(CONFIG_CPUID_H)
146 static bool have_cmov
;
151 /* We need these symbols in tcg-target.h, and we can't properly conditionalize
152 it there. Therefore we always define the variable. */
158 #ifdef CONFIG_CPUID_H
159 static bool have_movbe
;
160 static bool have_bmi2
;
161 static bool have_lzcnt
;
163 # define have_movbe 0
165 # define have_lzcnt 0
168 static tcg_insn_unit
*tb_ret_addr
;
170 static bool patch_reloc(tcg_insn_unit
*code_ptr
, int type
,
171 intptr_t value
, intptr_t addend
)
176 value
-= (uintptr_t)code_ptr
;
177 if (value
!= (int32_t)value
) {
182 tcg_patch32(code_ptr
, value
);
185 value
-= (uintptr_t)code_ptr
;
186 if (value
!= (int8_t)value
) {
189 tcg_patch8(code_ptr
, value
);
197 #if TCG_TARGET_REG_BITS == 64
198 #define ALL_GENERAL_REGS 0x0000ffffu
199 #define ALL_VECTOR_REGS 0xffff0000u
201 #define ALL_GENERAL_REGS 0x000000ffu
202 #define ALL_VECTOR_REGS 0x00ff0000u
205 /* parse target specific constraints */
206 static const char *target_parse_constraint(TCGArgConstraint
*ct
,
207 const char *ct_str
, TCGType type
)
211 ct
->ct
|= TCG_CT_REG
;
212 tcg_regset_set_reg(ct
->u
.regs
, TCG_REG_EAX
);
215 ct
->ct
|= TCG_CT_REG
;
216 tcg_regset_set_reg(ct
->u
.regs
, TCG_REG_EBX
);
219 ct
->ct
|= TCG_CT_REG
;
220 tcg_regset_set_reg(ct
->u
.regs
, TCG_REG_ECX
);
223 ct
->ct
|= TCG_CT_REG
;
224 tcg_regset_set_reg(ct
->u
.regs
, TCG_REG_EDX
);
227 ct
->ct
|= TCG_CT_REG
;
228 tcg_regset_set_reg(ct
->u
.regs
, TCG_REG_ESI
);
231 ct
->ct
|= TCG_CT_REG
;
232 tcg_regset_set_reg(ct
->u
.regs
, TCG_REG_EDI
);
235 /* A register that can be used as a byte operand. */
236 ct
->ct
|= TCG_CT_REG
;
237 ct
->u
.regs
= TCG_TARGET_REG_BITS
== 64 ? 0xffff : 0xf;
240 /* A register with an addressable second byte (e.g. %ah). */
241 ct
->ct
|= TCG_CT_REG
;
245 /* A general register. */
246 ct
->ct
|= TCG_CT_REG
;
247 ct
->u
.regs
|= ALL_GENERAL_REGS
;
250 /* With TZCNT/LZCNT, we can have operand-size as an input. */
251 ct
->ct
|= TCG_CT_CONST_WSZ
;
254 /* A vector register. */
255 ct
->ct
|= TCG_CT_REG
;
256 ct
->u
.regs
|= ALL_VECTOR_REGS
;
259 /* qemu_ld/st address constraint */
261 ct
->ct
|= TCG_CT_REG
;
262 ct
->u
.regs
= TCG_TARGET_REG_BITS
== 64 ? 0xffff : 0xff;
263 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_L0
);
264 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_L1
);
268 ct
->ct
|= (type
== TCG_TYPE_I32
? TCG_CT_CONST
: TCG_CT_CONST_S32
);
271 ct
->ct
|= (type
== TCG_TYPE_I32
? TCG_CT_CONST
: TCG_CT_CONST_U32
);
274 ct
->ct
|= (type
== TCG_TYPE_I32
? TCG_CT_CONST
: TCG_CT_CONST_I32
);
283 /* test if a constant matches the constraint */
284 static inline int tcg_target_const_match(tcg_target_long val
, TCGType type
,
285 const TCGArgConstraint
*arg_ct
)
288 if (ct
& TCG_CT_CONST
) {
291 if ((ct
& TCG_CT_CONST_S32
) && val
== (int32_t)val
) {
294 if ((ct
& TCG_CT_CONST_U32
) && val
== (uint32_t)val
) {
297 if ((ct
& TCG_CT_CONST_I32
) && ~val
== (int32_t)~val
) {
300 if ((ct
& TCG_CT_CONST_WSZ
) && val
== (type
== TCG_TYPE_I32
? 32 : 64)) {
306 # define LOWREGMASK(x) ((x) & 7)
308 #define P_EXT 0x100 /* 0x0f opcode prefix */
309 #define P_EXT38 0x200 /* 0x0f 0x38 opcode prefix */
310 #define P_DATA16 0x400 /* 0x66 opcode prefix */
311 #if TCG_TARGET_REG_BITS == 64
312 # define P_ADDR32 0x800 /* 0x67 opcode prefix */
313 # define P_REXW 0x1000 /* Set REX.W = 1 */
314 # define P_REXB_R 0x2000 /* REG field as byte register */
315 # define P_REXB_RM 0x4000 /* R/M field as byte register */
316 # define P_GS 0x8000 /* gs segment override */
324 #define P_EXT3A 0x10000 /* 0x0f 0x3a opcode prefix */
325 #define P_SIMDF3 0x20000 /* 0xf3 opcode prefix */
326 #define P_SIMDF2 0x40000 /* 0xf2 opcode prefix */
327 #define P_VEXL 0x80000 /* Set VEX.L = 1 */
329 #define OPC_ARITH_EvIz (0x81)
330 #define OPC_ARITH_EvIb (0x83)
331 #define OPC_ARITH_GvEv (0x03) /* ... plus (ARITH_FOO << 3) */
332 #define OPC_ANDN (0xf2 | P_EXT38)
333 #define OPC_ADD_GvEv (OPC_ARITH_GvEv | (ARITH_ADD << 3))
334 #define OPC_BLENDPS (0x0c | P_EXT3A | P_DATA16)
335 #define OPC_BSF (0xbc | P_EXT)
336 #define OPC_BSR (0xbd | P_EXT)
337 #define OPC_BSWAP (0xc8 | P_EXT)
338 #define OPC_CALL_Jz (0xe8)
339 #define OPC_CMOVCC (0x40 | P_EXT) /* ... plus condition code */
340 #define OPC_CMP_GvEv (OPC_ARITH_GvEv | (ARITH_CMP << 3))
341 #define OPC_DEC_r32 (0x48)
342 #define OPC_IMUL_GvEv (0xaf | P_EXT)
343 #define OPC_IMUL_GvEvIb (0x6b)
344 #define OPC_IMUL_GvEvIz (0x69)
345 #define OPC_INC_r32 (0x40)
346 #define OPC_JCC_long (0x80 | P_EXT) /* ... plus condition code */
347 #define OPC_JCC_short (0x70) /* ... plus condition code */
348 #define OPC_JMP_long (0xe9)
349 #define OPC_JMP_short (0xeb)
350 #define OPC_LEA (0x8d)
351 #define OPC_LZCNT (0xbd | P_EXT | P_SIMDF3)
352 #define OPC_MOVB_EvGv (0x88) /* stores, more or less */
353 #define OPC_MOVL_EvGv (0x89) /* stores, more or less */
354 #define OPC_MOVL_GvEv (0x8b) /* loads, more or less */
355 #define OPC_MOVB_EvIz (0xc6)
356 #define OPC_MOVL_EvIz (0xc7)
357 #define OPC_MOVL_Iv (0xb8)
358 #define OPC_MOVBE_GyMy (0xf0 | P_EXT38)
359 #define OPC_MOVBE_MyGy (0xf1 | P_EXT38)
360 #define OPC_MOVD_VyEy (0x6e | P_EXT | P_DATA16)
361 #define OPC_MOVD_EyVy (0x7e | P_EXT | P_DATA16)
362 #define OPC_MOVDDUP (0x12 | P_EXT | P_SIMDF2)
363 #define OPC_MOVDQA_VxWx (0x6f | P_EXT | P_DATA16)
364 #define OPC_MOVDQA_WxVx (0x7f | P_EXT | P_DATA16)
365 #define OPC_MOVDQU_VxWx (0x6f | P_EXT | P_SIMDF3)
366 #define OPC_MOVDQU_WxVx (0x7f | P_EXT | P_SIMDF3)
367 #define OPC_MOVQ_VqWq (0x7e | P_EXT | P_SIMDF3)
368 #define OPC_MOVQ_WqVq (0xd6 | P_EXT | P_DATA16)
369 #define OPC_MOVSBL (0xbe | P_EXT)
370 #define OPC_MOVSWL (0xbf | P_EXT)
371 #define OPC_MOVSLQ (0x63 | P_REXW)
372 #define OPC_MOVZBL (0xb6 | P_EXT)
373 #define OPC_MOVZWL (0xb7 | P_EXT)
374 #define OPC_PACKSSDW (0x6b | P_EXT | P_DATA16)
375 #define OPC_PACKSSWB (0x63 | P_EXT | P_DATA16)
376 #define OPC_PACKUSDW (0x2b | P_EXT38 | P_DATA16)
377 #define OPC_PACKUSWB (0x67 | P_EXT | P_DATA16)
378 #define OPC_PADDB (0xfc | P_EXT | P_DATA16)
379 #define OPC_PADDW (0xfd | P_EXT | P_DATA16)
380 #define OPC_PADDD (0xfe | P_EXT | P_DATA16)
381 #define OPC_PADDQ (0xd4 | P_EXT | P_DATA16)
382 #define OPC_PAND (0xdb | P_EXT | P_DATA16)
383 #define OPC_PANDN (0xdf | P_EXT | P_DATA16)
384 #define OPC_PBLENDW (0x0e | P_EXT3A | P_DATA16)
385 #define OPC_PCMPEQB (0x74 | P_EXT | P_DATA16)
386 #define OPC_PCMPEQW (0x75 | P_EXT | P_DATA16)
387 #define OPC_PCMPEQD (0x76 | P_EXT | P_DATA16)
388 #define OPC_PCMPEQQ (0x29 | P_EXT38 | P_DATA16)
389 #define OPC_PCMPGTB (0x64 | P_EXT | P_DATA16)
390 #define OPC_PCMPGTW (0x65 | P_EXT | P_DATA16)
391 #define OPC_PCMPGTD (0x66 | P_EXT | P_DATA16)
392 #define OPC_PCMPGTQ (0x37 | P_EXT38 | P_DATA16)
393 #define OPC_PMOVSXBW (0x20 | P_EXT38 | P_DATA16)
394 #define OPC_PMOVSXWD (0x23 | P_EXT38 | P_DATA16)
395 #define OPC_PMOVSXDQ (0x25 | P_EXT38 | P_DATA16)
396 #define OPC_PMOVZXBW (0x30 | P_EXT38 | P_DATA16)
397 #define OPC_PMOVZXWD (0x33 | P_EXT38 | P_DATA16)
398 #define OPC_PMOVZXDQ (0x35 | P_EXT38 | P_DATA16)
399 #define OPC_PMULLW (0xd5 | P_EXT | P_DATA16)
400 #define OPC_PMULLD (0x40 | P_EXT38 | P_DATA16)
401 #define OPC_POR (0xeb | P_EXT | P_DATA16)
402 #define OPC_PSHUFB (0x00 | P_EXT38 | P_DATA16)
403 #define OPC_PSHUFD (0x70 | P_EXT | P_DATA16)
404 #define OPC_PSHUFLW (0x70 | P_EXT | P_SIMDF2)
405 #define OPC_PSHUFHW (0x70 | P_EXT | P_SIMDF3)
406 #define OPC_PSHIFTW_Ib (0x71 | P_EXT | P_DATA16) /* /2 /6 /4 */
407 #define OPC_PSHIFTD_Ib (0x72 | P_EXT | P_DATA16) /* /2 /6 /4 */
408 #define OPC_PSHIFTQ_Ib (0x73 | P_EXT | P_DATA16) /* /2 /6 /4 */
409 #define OPC_PSUBB (0xf8 | P_EXT | P_DATA16)
410 #define OPC_PSUBW (0xf9 | P_EXT | P_DATA16)
411 #define OPC_PSUBD (0xfa | P_EXT | P_DATA16)
412 #define OPC_PSUBQ (0xfb | P_EXT | P_DATA16)
413 #define OPC_PUNPCKLBW (0x60 | P_EXT | P_DATA16)
414 #define OPC_PUNPCKLWD (0x61 | P_EXT | P_DATA16)
415 #define OPC_PUNPCKLDQ (0x62 | P_EXT | P_DATA16)
416 #define OPC_PUNPCKLQDQ (0x6c | P_EXT | P_DATA16)
417 #define OPC_PUNPCKHBW (0x68 | P_EXT | P_DATA16)
418 #define OPC_PUNPCKHWD (0x69 | P_EXT | P_DATA16)
419 #define OPC_PUNPCKHDQ (0x6a | P_EXT | P_DATA16)
420 #define OPC_PUNPCKHQDQ (0x6d | P_EXT | P_DATA16)
421 #define OPC_PXOR (0xef | P_EXT | P_DATA16)
422 #define OPC_POP_r32 (0x58)
423 #define OPC_POPCNT (0xb8 | P_EXT | P_SIMDF3)
424 #define OPC_PUSH_r32 (0x50)
425 #define OPC_PUSH_Iv (0x68)
426 #define OPC_PUSH_Ib (0x6a)
427 #define OPC_RET (0xc3)
428 #define OPC_SETCC (0x90 | P_EXT | P_REXB_RM) /* ... plus cc */
429 #define OPC_SHIFT_1 (0xd1)
430 #define OPC_SHIFT_Ib (0xc1)
431 #define OPC_SHIFT_cl (0xd3)
432 #define OPC_SARX (0xf7 | P_EXT38 | P_SIMDF3)
433 #define OPC_SHUFPS (0xc6 | P_EXT)
434 #define OPC_SHLX (0xf7 | P_EXT38 | P_DATA16)
435 #define OPC_SHRX (0xf7 | P_EXT38 | P_SIMDF2)
436 #define OPC_TESTL (0x85)
437 #define OPC_TZCNT (0xbc | P_EXT | P_SIMDF3)
438 #define OPC_UD2 (0x0b | P_EXT)
439 #define OPC_VPBLENDD (0x02 | P_EXT3A | P_DATA16)
440 #define OPC_VPBLENDVB (0x4c | P_EXT3A | P_DATA16)
441 #define OPC_VPBROADCASTB (0x78 | P_EXT38 | P_DATA16)
442 #define OPC_VPBROADCASTW (0x79 | P_EXT38 | P_DATA16)
443 #define OPC_VPBROADCASTD (0x58 | P_EXT38 | P_DATA16)
444 #define OPC_VPBROADCASTQ (0x59 | P_EXT38 | P_DATA16)
445 #define OPC_VPERMQ (0x00 | P_EXT3A | P_DATA16 | P_REXW)
446 #define OPC_VPERM2I128 (0x46 | P_EXT3A | P_DATA16 | P_VEXL)
447 #define OPC_VZEROUPPER (0x77 | P_EXT)
448 #define OPC_XCHG_ax_r32 (0x90)
450 #define OPC_GRP3_Ev (0xf7)
451 #define OPC_GRP5 (0xff)
452 #define OPC_GRP14 (0x73 | P_EXT | P_DATA16)
454 /* Group 1 opcode extensions for 0x80-0x83.
455 These are also used as modifiers for OPC_ARITH. */
465 /* Group 2 opcode extensions for 0xc0, 0xc1, 0xd0-0xd3. */
472 /* Group 3 opcode extensions for 0xf6, 0xf7. To be used with OPC_GRP3. */
480 /* Group 5 opcode extensions for 0xff. To be used with OPC_GRP5. */
481 #define EXT5_INC_Ev 0
482 #define EXT5_DEC_Ev 1
483 #define EXT5_CALLN_Ev 2
484 #define EXT5_JMPN_Ev 4
486 /* Condition codes to be added to OPC_JCC_{long,short}. */
505 static const uint8_t tcg_cond_to_jcc
[] = {
506 [TCG_COND_EQ
] = JCC_JE
,
507 [TCG_COND_NE
] = JCC_JNE
,
508 [TCG_COND_LT
] = JCC_JL
,
509 [TCG_COND_GE
] = JCC_JGE
,
510 [TCG_COND_LE
] = JCC_JLE
,
511 [TCG_COND_GT
] = JCC_JG
,
512 [TCG_COND_LTU
] = JCC_JB
,
513 [TCG_COND_GEU
] = JCC_JAE
,
514 [TCG_COND_LEU
] = JCC_JBE
,
515 [TCG_COND_GTU
] = JCC_JA
,
518 #if TCG_TARGET_REG_BITS == 64
519 static void tcg_out_opc(TCGContext
*s
, int opc
, int r
, int rm
, int x
)
526 if (opc
& P_DATA16
) {
527 /* We should never be asking for both 16 and 64-bit operation. */
528 tcg_debug_assert((opc
& P_REXW
) == 0);
531 if (opc
& P_ADDR32
) {
534 if (opc
& P_SIMDF3
) {
536 } else if (opc
& P_SIMDF2
) {
541 rex
|= (opc
& P_REXW
) ? 0x8 : 0x0; /* REX.W */
542 rex
|= (r
& 8) >> 1; /* REX.R */
543 rex
|= (x
& 8) >> 2; /* REX.X */
544 rex
|= (rm
& 8) >> 3; /* REX.B */
546 /* P_REXB_{R,RM} indicates that the given register is the low byte.
547 For %[abcd]l we need no REX prefix, but for %{si,di,bp,sp}l we do,
548 as otherwise the encoding indicates %[abcd]h. Note that the values
549 that are ORed in merely indicate that the REX byte must be present;
550 those bits get discarded in output. */
551 rex
|= opc
& (r
>= 4 ? P_REXB_R
: 0);
552 rex
|= opc
& (rm
>= 4 ? P_REXB_RM
: 0);
555 tcg_out8(s
, (uint8_t)(rex
| 0x40));
558 if (opc
& (P_EXT
| P_EXT38
| P_EXT3A
)) {
562 } else if (opc
& P_EXT3A
) {
570 static void tcg_out_opc(TCGContext
*s
, int opc
)
572 if (opc
& P_DATA16
) {
575 if (opc
& P_SIMDF3
) {
577 } else if (opc
& P_SIMDF2
) {
580 if (opc
& (P_EXT
| P_EXT38
| P_EXT3A
)) {
584 } else if (opc
& P_EXT3A
) {
590 /* Discard the register arguments to tcg_out_opc early, so as not to penalize
591 the 32-bit compilation paths. This method works with all versions of gcc,
592 whereas relying on optimization may not be able to exclude them. */
593 #define tcg_out_opc(s, opc, r, rm, x) (tcg_out_opc)(s, opc)
596 static void tcg_out_modrm(TCGContext
*s
, int opc
, int r
, int rm
)
598 tcg_out_opc(s
, opc
, r
, rm
, 0);
599 tcg_out8(s
, 0xc0 | (LOWREGMASK(r
) << 3) | LOWREGMASK(rm
));
602 static void tcg_out_vex_opc(TCGContext
*s
, int opc
, int r
, int v
,
607 /* Use the two byte form if possible, which cannot encode
608 VEX.W, VEX.B, VEX.X, or an m-mmmm field other than P_EXT. */
609 if ((opc
& (P_EXT
| P_EXT38
| P_EXT3A
| P_REXW
)) == P_EXT
610 && ((rm
| index
) & 8) == 0) {
611 /* Two byte VEX prefix. */
614 tmp
= (r
& 8 ? 0 : 0x80); /* VEX.R */
616 /* Three byte VEX prefix. */
622 } else if (opc
& P_EXT38
) {
624 } else if (opc
& P_EXT
) {
627 g_assert_not_reached();
629 tmp
|= (r
& 8 ? 0 : 0x80); /* VEX.R */
630 tmp
|= (index
& 8 ? 0 : 0x40); /* VEX.X */
631 tmp
|= (rm
& 8 ? 0 : 0x20); /* VEX.B */
634 tmp
= (opc
& P_REXW
? 0x80 : 0); /* VEX.W */
637 tmp
|= (opc
& P_VEXL
? 0x04 : 0); /* VEX.L */
639 if (opc
& P_DATA16
) {
641 } else if (opc
& P_SIMDF3
) {
643 } else if (opc
& P_SIMDF2
) {
646 tmp
|= (~v
& 15) << 3; /* VEX.vvvv */
651 static void tcg_out_vex_modrm(TCGContext
*s
, int opc
, int r
, int v
, int rm
)
653 tcg_out_vex_opc(s
, opc
, r
, v
, rm
, 0);
654 tcg_out8(s
, 0xc0 | (LOWREGMASK(r
) << 3) | LOWREGMASK(rm
));
657 /* Output an opcode with a full "rm + (index<<shift) + offset" address mode.
658 We handle either RM and INDEX missing with a negative value. In 64-bit
659 mode for absolute addresses, ~RM is the size of the immediate operand
660 that will follow the instruction. */
662 static void tcg_out_sib_offset(TCGContext
*s
, int r
, int rm
, int index
,
663 int shift
, intptr_t offset
)
667 if (index
< 0 && rm
< 0) {
668 if (TCG_TARGET_REG_BITS
== 64) {
669 /* Try for a rip-relative addressing mode. This has replaced
670 the 32-bit-mode absolute addressing encoding. */
671 intptr_t pc
= (intptr_t)s
->code_ptr
+ 5 + ~rm
;
672 intptr_t disp
= offset
- pc
;
673 if (disp
== (int32_t)disp
) {
674 tcg_out8(s
, (LOWREGMASK(r
) << 3) | 5);
679 /* Try for an absolute address encoding. This requires the
680 use of the MODRM+SIB encoding and is therefore larger than
681 rip-relative addressing. */
682 if (offset
== (int32_t)offset
) {
683 tcg_out8(s
, (LOWREGMASK(r
) << 3) | 4);
684 tcg_out8(s
, (4 << 3) | 5);
685 tcg_out32(s
, offset
);
689 /* ??? The memory isn't directly addressable. */
690 g_assert_not_reached();
692 /* Absolute address. */
693 tcg_out8(s
, (r
<< 3) | 5);
694 tcg_out32(s
, offset
);
699 /* Find the length of the immediate addend. Note that the encoding
700 that would be used for (%ebp) indicates absolute addressing. */
702 mod
= 0, len
= 4, rm
= 5;
703 } else if (offset
== 0 && LOWREGMASK(rm
) != TCG_REG_EBP
) {
705 } else if (offset
== (int8_t)offset
) {
711 /* Use a single byte MODRM format if possible. Note that the encoding
712 that would be used for %esp is the escape to the two byte form. */
713 if (index
< 0 && LOWREGMASK(rm
) != TCG_REG_ESP
) {
714 /* Single byte MODRM format. */
715 tcg_out8(s
, mod
| (LOWREGMASK(r
) << 3) | LOWREGMASK(rm
));
717 /* Two byte MODRM+SIB format. */
719 /* Note that the encoding that would place %esp into the index
720 field indicates no index register. In 64-bit mode, the REX.X
721 bit counts, so %r12 can be used as the index. */
725 tcg_debug_assert(index
!= TCG_REG_ESP
);
728 tcg_out8(s
, mod
| (LOWREGMASK(r
) << 3) | 4);
729 tcg_out8(s
, (shift
<< 6) | (LOWREGMASK(index
) << 3) | LOWREGMASK(rm
));
734 } else if (len
== 4) {
735 tcg_out32(s
, offset
);
739 static void tcg_out_modrm_sib_offset(TCGContext
*s
, int opc
, int r
, int rm
,
740 int index
, int shift
, intptr_t offset
)
742 tcg_out_opc(s
, opc
, r
, rm
< 0 ? 0 : rm
, index
< 0 ? 0 : index
);
743 tcg_out_sib_offset(s
, r
, rm
, index
, shift
, offset
);
746 static void tcg_out_vex_modrm_sib_offset(TCGContext
*s
, int opc
, int r
, int v
,
747 int rm
, int index
, int shift
,
750 tcg_out_vex_opc(s
, opc
, r
, v
, rm
< 0 ? 0 : rm
, index
< 0 ? 0 : index
);
751 tcg_out_sib_offset(s
, r
, rm
, index
, shift
, offset
);
754 /* A simplification of the above with no index or shift. */
755 static inline void tcg_out_modrm_offset(TCGContext
*s
, int opc
, int r
,
756 int rm
, intptr_t offset
)
758 tcg_out_modrm_sib_offset(s
, opc
, r
, rm
, -1, 0, offset
);
761 static inline void tcg_out_vex_modrm_offset(TCGContext
*s
, int opc
, int r
,
762 int v
, int rm
, intptr_t offset
)
764 tcg_out_vex_modrm_sib_offset(s
, opc
, r
, v
, rm
, -1, 0, offset
);
767 /* Output an opcode with an expected reference to the constant pool. */
768 static inline void tcg_out_modrm_pool(TCGContext
*s
, int opc
, int r
)
770 tcg_out_opc(s
, opc
, r
, 0, 0);
771 /* Absolute for 32-bit, pc-relative for 64-bit. */
772 tcg_out8(s
, LOWREGMASK(r
) << 3 | 5);
776 /* Output an opcode with an expected reference to the constant pool. */
777 static inline void tcg_out_vex_modrm_pool(TCGContext
*s
, int opc
, int r
)
779 tcg_out_vex_opc(s
, opc
, r
, 0, 0, 0);
780 /* Absolute for 32-bit, pc-relative for 64-bit. */
781 tcg_out8(s
, LOWREGMASK(r
) << 3 | 5);
785 /* Generate dest op= src. Uses the same ARITH_* codes as tgen_arithi. */
786 static inline void tgen_arithr(TCGContext
*s
, int subop
, int dest
, int src
)
788 /* Propagate an opcode prefix, such as P_REXW. */
789 int ext
= subop
& ~0x7;
792 tcg_out_modrm(s
, OPC_ARITH_GvEv
+ (subop
<< 3) + ext
, dest
, src
);
795 static void tcg_out_mov(TCGContext
*s
, TCGType type
, TCGReg ret
, TCGReg arg
)
809 tcg_out_modrm(s
, OPC_MOVL_GvEv
+ rexw
, ret
, arg
);
811 tcg_out_vex_modrm(s
, OPC_MOVD_EyVy
+ rexw
, arg
, 0, ret
);
815 tcg_out_vex_modrm(s
, OPC_MOVD_VyEy
+ rexw
, ret
, 0, arg
);
817 tcg_out_vex_modrm(s
, OPC_MOVQ_VqWq
, ret
, 0, arg
);
823 tcg_debug_assert(ret
>= 16 && arg
>= 16);
824 tcg_out_vex_modrm(s
, OPC_MOVQ_VqWq
, ret
, 0, arg
);
827 tcg_debug_assert(ret
>= 16 && arg
>= 16);
828 tcg_out_vex_modrm(s
, OPC_MOVDQA_VxWx
, ret
, 0, arg
);
831 tcg_debug_assert(ret
>= 16 && arg
>= 16);
832 tcg_out_vex_modrm(s
, OPC_MOVDQA_VxWx
| P_VEXL
, ret
, 0, arg
);
836 g_assert_not_reached();
840 static void tcg_out_dup_vec(TCGContext
*s
, TCGType type
, unsigned vece
,
844 static const int dup_insn
[4] = {
845 OPC_VPBROADCASTB
, OPC_VPBROADCASTW
,
846 OPC_VPBROADCASTD
, OPC_VPBROADCASTQ
,
848 int vex_l
= (type
== TCG_TYPE_V256
? P_VEXL
: 0);
849 tcg_out_vex_modrm(s
, dup_insn
[vece
] + vex_l
, r
, 0, a
);
853 /* ??? With zero in a register, use PSHUFB. */
854 tcg_out_vex_modrm(s
, OPC_PUNPCKLBW
, r
, a
, a
);
858 tcg_out_vex_modrm(s
, OPC_PUNPCKLWD
, r
, a
, a
);
862 tcg_out_vex_modrm(s
, OPC_PSHUFD
, r
, 0, a
);
863 /* imm8 operand: all output lanes selected from input lane 0. */
867 tcg_out_vex_modrm(s
, OPC_PUNPCKLQDQ
, r
, a
, a
);
870 g_assert_not_reached();
875 static void tcg_out_dupi_vec(TCGContext
*s
, TCGType type
,
876 TCGReg ret
, tcg_target_long arg
)
878 int vex_l
= (type
== TCG_TYPE_V256
? P_VEXL
: 0);
881 tcg_out_vex_modrm(s
, OPC_PXOR
, ret
, ret
, ret
);
885 tcg_out_vex_modrm(s
, OPC_PCMPEQB
+ vex_l
, ret
, ret
, ret
);
889 if (TCG_TARGET_REG_BITS
== 64) {
890 if (type
== TCG_TYPE_V64
) {
891 tcg_out_vex_modrm_pool(s
, OPC_MOVQ_VqWq
, ret
);
892 } else if (have_avx2
) {
893 tcg_out_vex_modrm_pool(s
, OPC_VPBROADCASTQ
+ vex_l
, ret
);
895 tcg_out_vex_modrm_pool(s
, OPC_MOVDDUP
, ret
);
897 new_pool_label(s
, arg
, R_386_PC32
, s
->code_ptr
- 4, -4);
898 } else if (have_avx2
) {
899 tcg_out_vex_modrm_pool(s
, OPC_VPBROADCASTD
+ vex_l
, ret
);
900 new_pool_label(s
, arg
, R_386_32
, s
->code_ptr
- 4, 0);
902 tcg_out_vex_modrm_pool(s
, OPC_MOVD_VyEy
, ret
);
903 new_pool_label(s
, arg
, R_386_32
, s
->code_ptr
- 4, 0);
904 tcg_out_dup_vec(s
, type
, MO_32
, ret
, ret
);
908 static void tcg_out_movi(TCGContext
*s
, TCGType type
,
909 TCGReg ret
, tcg_target_long arg
)
911 tcg_target_long diff
;
915 #if TCG_TARGET_REG_BITS == 64
925 tcg_debug_assert(ret
>= 16);
926 tcg_out_dupi_vec(s
, type
, ret
, arg
);
929 g_assert_not_reached();
933 tgen_arithr(s
, ARITH_XOR
, ret
, ret
);
936 if (arg
== (uint32_t)arg
|| type
== TCG_TYPE_I32
) {
937 tcg_out_opc(s
, OPC_MOVL_Iv
+ LOWREGMASK(ret
), 0, ret
, 0);
941 if (arg
== (int32_t)arg
) {
942 tcg_out_modrm(s
, OPC_MOVL_EvIz
+ P_REXW
, 0, ret
);
947 /* Try a 7 byte pc-relative lea before the 10 byte movq. */
948 diff
= arg
- ((uintptr_t)s
->code_ptr
+ 7);
949 if (diff
== (int32_t)diff
) {
950 tcg_out_opc(s
, OPC_LEA
| P_REXW
, ret
, 0, 0);
951 tcg_out8(s
, (LOWREGMASK(ret
) << 3) | 5);
956 tcg_out_opc(s
, OPC_MOVL_Iv
+ P_REXW
+ LOWREGMASK(ret
), 0, ret
, 0);
960 static inline void tcg_out_pushi(TCGContext
*s
, tcg_target_long val
)
962 if (val
== (int8_t)val
) {
963 tcg_out_opc(s
, OPC_PUSH_Ib
, 0, 0, 0);
965 } else if (val
== (int32_t)val
) {
966 tcg_out_opc(s
, OPC_PUSH_Iv
, 0, 0, 0);
973 static inline void tcg_out_mb(TCGContext
*s
, TCGArg a0
)
975 /* Given the strength of x86 memory ordering, we only need care for
976 store-load ordering. Experimentally, "lock orl $0,0(%esp)" is
977 faster than "mfence", so don't bother with the sse insn. */
978 if (a0
& TCG_MO_ST_LD
) {
980 tcg_out_modrm_offset(s
, OPC_ARITH_EvIb
, ARITH_OR
, TCG_REG_ESP
, 0);
985 static inline void tcg_out_push(TCGContext
*s
, int reg
)
987 tcg_out_opc(s
, OPC_PUSH_r32
+ LOWREGMASK(reg
), 0, reg
, 0);
990 static inline void tcg_out_pop(TCGContext
*s
, int reg
)
992 tcg_out_opc(s
, OPC_POP_r32
+ LOWREGMASK(reg
), 0, reg
, 0);
995 static void tcg_out_ld(TCGContext
*s
, TCGType type
, TCGReg ret
,
996 TCGReg arg1
, intptr_t arg2
)
1001 tcg_out_modrm_offset(s
, OPC_MOVL_GvEv
, ret
, arg1
, arg2
);
1003 tcg_out_vex_modrm_offset(s
, OPC_MOVD_VyEy
, ret
, 0, arg1
, arg2
);
1008 tcg_out_modrm_offset(s
, OPC_MOVL_GvEv
| P_REXW
, ret
, arg1
, arg2
);
1013 tcg_debug_assert(ret
>= 16);
1014 tcg_out_vex_modrm_offset(s
, OPC_MOVQ_VqWq
, ret
, 0, arg1
, arg2
);
1017 tcg_debug_assert(ret
>= 16);
1018 tcg_out_vex_modrm_offset(s
, OPC_MOVDQU_VxWx
, ret
, 0, arg1
, arg2
);
1021 tcg_debug_assert(ret
>= 16);
1022 tcg_out_vex_modrm_offset(s
, OPC_MOVDQU_VxWx
| P_VEXL
,
1023 ret
, 0, arg1
, arg2
);
1026 g_assert_not_reached();
1030 static void tcg_out_st(TCGContext
*s
, TCGType type
, TCGReg arg
,
1031 TCGReg arg1
, intptr_t arg2
)
1036 tcg_out_modrm_offset(s
, OPC_MOVL_EvGv
, arg
, arg1
, arg2
);
1038 tcg_out_vex_modrm_offset(s
, OPC_MOVD_EyVy
, arg
, 0, arg1
, arg2
);
1043 tcg_out_modrm_offset(s
, OPC_MOVL_EvGv
| P_REXW
, arg
, arg1
, arg2
);
1048 tcg_debug_assert(arg
>= 16);
1049 tcg_out_vex_modrm_offset(s
, OPC_MOVQ_WqVq
, arg
, 0, arg1
, arg2
);
1052 tcg_debug_assert(arg
>= 16);
1053 tcg_out_vex_modrm_offset(s
, OPC_MOVDQU_WxVx
, arg
, 0, arg1
, arg2
);
1056 tcg_debug_assert(arg
>= 16);
1057 tcg_out_vex_modrm_offset(s
, OPC_MOVDQU_WxVx
| P_VEXL
,
1058 arg
, 0, arg1
, arg2
);
1061 g_assert_not_reached();
1065 static bool tcg_out_sti(TCGContext
*s
, TCGType type
, TCGArg val
,
1066 TCGReg base
, intptr_t ofs
)
1069 if (TCG_TARGET_REG_BITS
== 64 && type
== TCG_TYPE_I64
) {
1070 if (val
!= (int32_t)val
) {
1074 } else if (type
!= TCG_TYPE_I32
) {
1077 tcg_out_modrm_offset(s
, OPC_MOVL_EvIz
| rexw
, 0, base
, ofs
);
1082 static void tcg_out_shifti(TCGContext
*s
, int subopc
, int reg
, int count
)
1084 /* Propagate an opcode prefix, such as P_DATA16. */
1085 int ext
= subopc
& ~0x7;
1089 tcg_out_modrm(s
, OPC_SHIFT_1
+ ext
, subopc
, reg
);
1091 tcg_out_modrm(s
, OPC_SHIFT_Ib
+ ext
, subopc
, reg
);
1096 static inline void tcg_out_bswap32(TCGContext
*s
, int reg
)
1098 tcg_out_opc(s
, OPC_BSWAP
+ LOWREGMASK(reg
), 0, reg
, 0);
1101 static inline void tcg_out_rolw_8(TCGContext
*s
, int reg
)
1103 tcg_out_shifti(s
, SHIFT_ROL
+ P_DATA16
, reg
, 8);
1106 static inline void tcg_out_ext8u(TCGContext
*s
, int dest
, int src
)
1109 tcg_debug_assert(src
< 4 || TCG_TARGET_REG_BITS
== 64);
1110 tcg_out_modrm(s
, OPC_MOVZBL
+ P_REXB_RM
, dest
, src
);
1113 static void tcg_out_ext8s(TCGContext
*s
, int dest
, int src
, int rexw
)
1116 tcg_debug_assert(src
< 4 || TCG_TARGET_REG_BITS
== 64);
1117 tcg_out_modrm(s
, OPC_MOVSBL
+ P_REXB_RM
+ rexw
, dest
, src
);
1120 static inline void tcg_out_ext16u(TCGContext
*s
, int dest
, int src
)
1123 tcg_out_modrm(s
, OPC_MOVZWL
, dest
, src
);
1126 static inline void tcg_out_ext16s(TCGContext
*s
, int dest
, int src
, int rexw
)
1129 tcg_out_modrm(s
, OPC_MOVSWL
+ rexw
, dest
, src
);
1132 static inline void tcg_out_ext32u(TCGContext
*s
, int dest
, int src
)
1134 /* 32-bit mov zero extends. */
1135 tcg_out_modrm(s
, OPC_MOVL_GvEv
, dest
, src
);
1138 static inline void tcg_out_ext32s(TCGContext
*s
, int dest
, int src
)
1140 tcg_out_modrm(s
, OPC_MOVSLQ
, dest
, src
);
1143 static inline void tcg_out_bswap64(TCGContext
*s
, int reg
)
1145 tcg_out_opc(s
, OPC_BSWAP
+ P_REXW
+ LOWREGMASK(reg
), 0, reg
, 0);
1148 static void tgen_arithi(TCGContext
*s
, int c
, int r0
,
1149 tcg_target_long val
, int cf
)
1153 if (TCG_TARGET_REG_BITS
== 64) {
1158 /* ??? While INC is 2 bytes shorter than ADDL $1, they also induce
1159 partial flags update stalls on Pentium4 and are not recommended
1160 by current Intel optimization manuals. */
1161 if (!cf
&& (c
== ARITH_ADD
|| c
== ARITH_SUB
) && (val
== 1 || val
== -1)) {
1162 int is_inc
= (c
== ARITH_ADD
) ^ (val
< 0);
1163 if (TCG_TARGET_REG_BITS
== 64) {
1164 /* The single-byte increment encodings are re-tasked as the
1165 REX prefixes. Use the MODRM encoding. */
1166 tcg_out_modrm(s
, OPC_GRP5
+ rexw
,
1167 (is_inc
? EXT5_INC_Ev
: EXT5_DEC_Ev
), r0
);
1169 tcg_out8(s
, (is_inc
? OPC_INC_r32
: OPC_DEC_r32
) + r0
);
1174 if (c
== ARITH_AND
) {
1175 if (TCG_TARGET_REG_BITS
== 64) {
1176 if (val
== 0xffffffffu
) {
1177 tcg_out_ext32u(s
, r0
, r0
);
1180 if (val
== (uint32_t)val
) {
1181 /* AND with no high bits set can use a 32-bit operation. */
1185 if (val
== 0xffu
&& (r0
< 4 || TCG_TARGET_REG_BITS
== 64)) {
1186 tcg_out_ext8u(s
, r0
, r0
);
1189 if (val
== 0xffffu
) {
1190 tcg_out_ext16u(s
, r0
, r0
);
1195 if (val
== (int8_t)val
) {
1196 tcg_out_modrm(s
, OPC_ARITH_EvIb
+ rexw
, c
, r0
);
1200 if (rexw
== 0 || val
== (int32_t)val
) {
1201 tcg_out_modrm(s
, OPC_ARITH_EvIz
+ rexw
, c
, r0
);
1209 static void tcg_out_addi(TCGContext
*s
, int reg
, tcg_target_long val
)
1212 tgen_arithi(s
, ARITH_ADD
+ P_REXW
, reg
, val
, 0);
1216 /* Use SMALL != 0 to force a short forward branch. */
1217 static void tcg_out_jxx(TCGContext
*s
, int opc
, TCGLabel
*l
, int small
)
1222 val
= tcg_pcrel_diff(s
, l
->u
.value_ptr
);
1224 if ((int8_t)val1
== val1
) {
1226 tcg_out8(s
, OPC_JMP_short
);
1228 tcg_out8(s
, OPC_JCC_short
+ opc
);
1236 tcg_out8(s
, OPC_JMP_long
);
1237 tcg_out32(s
, val
- 5);
1239 tcg_out_opc(s
, OPC_JCC_long
+ opc
, 0, 0, 0);
1240 tcg_out32(s
, val
- 6);
1245 tcg_out8(s
, OPC_JMP_short
);
1247 tcg_out8(s
, OPC_JCC_short
+ opc
);
1249 tcg_out_reloc(s
, s
->code_ptr
, R_386_PC8
, l
, -1);
1253 tcg_out8(s
, OPC_JMP_long
);
1255 tcg_out_opc(s
, OPC_JCC_long
+ opc
, 0, 0, 0);
1257 tcg_out_reloc(s
, s
->code_ptr
, R_386_PC32
, l
, -4);
1262 static void tcg_out_cmp(TCGContext
*s
, TCGArg arg1
, TCGArg arg2
,
1263 int const_arg2
, int rexw
)
1268 tcg_out_modrm(s
, OPC_TESTL
+ rexw
, arg1
, arg1
);
1270 tgen_arithi(s
, ARITH_CMP
+ rexw
, arg1
, arg2
, 0);
1273 tgen_arithr(s
, ARITH_CMP
+ rexw
, arg1
, arg2
);
1277 static void tcg_out_brcond32(TCGContext
*s
, TCGCond cond
,
1278 TCGArg arg1
, TCGArg arg2
, int const_arg2
,
1279 TCGLabel
*label
, int small
)
1281 tcg_out_cmp(s
, arg1
, arg2
, const_arg2
, 0);
1282 tcg_out_jxx(s
, tcg_cond_to_jcc
[cond
], label
, small
);
1285 #if TCG_TARGET_REG_BITS == 64
1286 static void tcg_out_brcond64(TCGContext
*s
, TCGCond cond
,
1287 TCGArg arg1
, TCGArg arg2
, int const_arg2
,
1288 TCGLabel
*label
, int small
)
1290 tcg_out_cmp(s
, arg1
, arg2
, const_arg2
, P_REXW
);
1291 tcg_out_jxx(s
, tcg_cond_to_jcc
[cond
], label
, small
);
1294 /* XXX: we implement it at the target level to avoid having to
1295 handle cross basic blocks temporaries */
1296 static void tcg_out_brcond2(TCGContext
*s
, const TCGArg
*args
,
1297 const int *const_args
, int small
)
1299 TCGLabel
*label_next
= gen_new_label();
1300 TCGLabel
*label_this
= arg_label(args
[5]);
1304 tcg_out_brcond32(s
, TCG_COND_NE
, args
[0], args
[2], const_args
[2],
1306 tcg_out_brcond32(s
, TCG_COND_EQ
, args
[1], args
[3], const_args
[3],
1310 tcg_out_brcond32(s
, TCG_COND_NE
, args
[0], args
[2], const_args
[2],
1312 tcg_out_brcond32(s
, TCG_COND_NE
, args
[1], args
[3], const_args
[3],
1316 tcg_out_brcond32(s
, TCG_COND_LT
, args
[1], args
[3], const_args
[3],
1318 tcg_out_jxx(s
, JCC_JNE
, label_next
, 1);
1319 tcg_out_brcond32(s
, TCG_COND_LTU
, args
[0], args
[2], const_args
[2],
1323 tcg_out_brcond32(s
, TCG_COND_LT
, args
[1], args
[3], const_args
[3],
1325 tcg_out_jxx(s
, JCC_JNE
, label_next
, 1);
1326 tcg_out_brcond32(s
, TCG_COND_LEU
, args
[0], args
[2], const_args
[2],
1330 tcg_out_brcond32(s
, TCG_COND_GT
, args
[1], args
[3], const_args
[3],
1332 tcg_out_jxx(s
, JCC_JNE
, label_next
, 1);
1333 tcg_out_brcond32(s
, TCG_COND_GTU
, args
[0], args
[2], const_args
[2],
1337 tcg_out_brcond32(s
, TCG_COND_GT
, args
[1], args
[3], const_args
[3],
1339 tcg_out_jxx(s
, JCC_JNE
, label_next
, 1);
1340 tcg_out_brcond32(s
, TCG_COND_GEU
, args
[0], args
[2], const_args
[2],
1344 tcg_out_brcond32(s
, TCG_COND_LTU
, args
[1], args
[3], const_args
[3],
1346 tcg_out_jxx(s
, JCC_JNE
, label_next
, 1);
1347 tcg_out_brcond32(s
, TCG_COND_LTU
, args
[0], args
[2], const_args
[2],
1351 tcg_out_brcond32(s
, TCG_COND_LTU
, args
[1], args
[3], const_args
[3],
1353 tcg_out_jxx(s
, JCC_JNE
, label_next
, 1);
1354 tcg_out_brcond32(s
, TCG_COND_LEU
, args
[0], args
[2], const_args
[2],
1358 tcg_out_brcond32(s
, TCG_COND_GTU
, args
[1], args
[3], const_args
[3],
1360 tcg_out_jxx(s
, JCC_JNE
, label_next
, 1);
1361 tcg_out_brcond32(s
, TCG_COND_GTU
, args
[0], args
[2], const_args
[2],
1365 tcg_out_brcond32(s
, TCG_COND_GTU
, args
[1], args
[3], const_args
[3],
1367 tcg_out_jxx(s
, JCC_JNE
, label_next
, 1);
1368 tcg_out_brcond32(s
, TCG_COND_GEU
, args
[0], args
[2], const_args
[2],
1374 tcg_out_label(s
, label_next
, s
->code_ptr
);
1378 static void tcg_out_setcond32(TCGContext
*s
, TCGCond cond
, TCGArg dest
,
1379 TCGArg arg1
, TCGArg arg2
, int const_arg2
)
1381 tcg_out_cmp(s
, arg1
, arg2
, const_arg2
, 0);
1382 tcg_out_modrm(s
, OPC_SETCC
| tcg_cond_to_jcc
[cond
], 0, dest
);
1383 tcg_out_ext8u(s
, dest
, dest
);
1386 #if TCG_TARGET_REG_BITS == 64
1387 static void tcg_out_setcond64(TCGContext
*s
, TCGCond cond
, TCGArg dest
,
1388 TCGArg arg1
, TCGArg arg2
, int const_arg2
)
1390 tcg_out_cmp(s
, arg1
, arg2
, const_arg2
, P_REXW
);
1391 tcg_out_modrm(s
, OPC_SETCC
| tcg_cond_to_jcc
[cond
], 0, dest
);
1392 tcg_out_ext8u(s
, dest
, dest
);
1395 static void tcg_out_setcond2(TCGContext
*s
, const TCGArg
*args
,
1396 const int *const_args
)
1399 TCGLabel
*label_true
, *label_over
;
1401 memcpy(new_args
, args
+1, 5*sizeof(TCGArg
));
1403 if (args
[0] == args
[1] || args
[0] == args
[2]
1404 || (!const_args
[3] && args
[0] == args
[3])
1405 || (!const_args
[4] && args
[0] == args
[4])) {
1406 /* When the destination overlaps with one of the argument
1407 registers, don't do anything tricky. */
1408 label_true
= gen_new_label();
1409 label_over
= gen_new_label();
1411 new_args
[5] = label_arg(label_true
);
1412 tcg_out_brcond2(s
, new_args
, const_args
+1, 1);
1414 tcg_out_movi(s
, TCG_TYPE_I32
, args
[0], 0);
1415 tcg_out_jxx(s
, JCC_JMP
, label_over
, 1);
1416 tcg_out_label(s
, label_true
, s
->code_ptr
);
1418 tcg_out_movi(s
, TCG_TYPE_I32
, args
[0], 1);
1419 tcg_out_label(s
, label_over
, s
->code_ptr
);
1421 /* When the destination does not overlap one of the arguments,
1422 clear the destination first, jump if cond false, and emit an
1423 increment in the true case. This results in smaller code. */
1425 tcg_out_movi(s
, TCG_TYPE_I32
, args
[0], 0);
1427 label_over
= gen_new_label();
1428 new_args
[4] = tcg_invert_cond(new_args
[4]);
1429 new_args
[5] = label_arg(label_over
);
1430 tcg_out_brcond2(s
, new_args
, const_args
+1, 1);
1432 tgen_arithi(s
, ARITH_ADD
, args
[0], 1, 0);
1433 tcg_out_label(s
, label_over
, s
->code_ptr
);
1438 static void tcg_out_cmov(TCGContext
*s
, TCGCond cond
, int rexw
,
1439 TCGReg dest
, TCGReg v1
)
1442 tcg_out_modrm(s
, OPC_CMOVCC
| tcg_cond_to_jcc
[cond
] | rexw
, dest
, v1
);
1444 TCGLabel
*over
= gen_new_label();
1445 tcg_out_jxx(s
, tcg_cond_to_jcc
[tcg_invert_cond(cond
)], over
, 1);
1446 tcg_out_mov(s
, TCG_TYPE_I32
, dest
, v1
);
1447 tcg_out_label(s
, over
, s
->code_ptr
);
1451 static void tcg_out_movcond32(TCGContext
*s
, TCGCond cond
, TCGReg dest
,
1452 TCGReg c1
, TCGArg c2
, int const_c2
,
1455 tcg_out_cmp(s
, c1
, c2
, const_c2
, 0);
1456 tcg_out_cmov(s
, cond
, 0, dest
, v1
);
1459 #if TCG_TARGET_REG_BITS == 64
1460 static void tcg_out_movcond64(TCGContext
*s
, TCGCond cond
, TCGReg dest
,
1461 TCGReg c1
, TCGArg c2
, int const_c2
,
1464 tcg_out_cmp(s
, c1
, c2
, const_c2
, P_REXW
);
1465 tcg_out_cmov(s
, cond
, P_REXW
, dest
, v1
);
1469 static void tcg_out_ctz(TCGContext
*s
, int rexw
, TCGReg dest
, TCGReg arg1
,
1470 TCGArg arg2
, bool const_a2
)
1473 tcg_out_modrm(s
, OPC_TZCNT
+ rexw
, dest
, arg1
);
1475 tcg_debug_assert(arg2
== (rexw
? 64 : 32));
1477 tcg_debug_assert(dest
!= arg2
);
1478 tcg_out_cmov(s
, TCG_COND_LTU
, rexw
, dest
, arg2
);
1481 tcg_debug_assert(dest
!= arg2
);
1482 tcg_out_modrm(s
, OPC_BSF
+ rexw
, dest
, arg1
);
1483 tcg_out_cmov(s
, TCG_COND_EQ
, rexw
, dest
, arg2
);
1487 static void tcg_out_clz(TCGContext
*s
, int rexw
, TCGReg dest
, TCGReg arg1
,
1488 TCGArg arg2
, bool const_a2
)
1491 tcg_out_modrm(s
, OPC_LZCNT
+ rexw
, dest
, arg1
);
1493 tcg_debug_assert(arg2
== (rexw
? 64 : 32));
1495 tcg_debug_assert(dest
!= arg2
);
1496 tcg_out_cmov(s
, TCG_COND_LTU
, rexw
, dest
, arg2
);
1499 tcg_debug_assert(!const_a2
);
1500 tcg_debug_assert(dest
!= arg1
);
1501 tcg_debug_assert(dest
!= arg2
);
1503 /* Recall that the output of BSR is the index not the count. */
1504 tcg_out_modrm(s
, OPC_BSR
+ rexw
, dest
, arg1
);
1505 tgen_arithi(s
, ARITH_XOR
+ rexw
, dest
, rexw
? 63 : 31, 0);
1507 /* Since we have destroyed the flags from BSR, we have to re-test. */
1508 tcg_out_cmp(s
, arg1
, 0, 1, rexw
);
1509 tcg_out_cmov(s
, TCG_COND_EQ
, rexw
, dest
, arg2
);
1513 static void tcg_out_branch(TCGContext
*s
, int call
, tcg_insn_unit
*dest
)
1515 intptr_t disp
= tcg_pcrel_diff(s
, dest
) - 5;
1517 if (disp
== (int32_t)disp
) {
1518 tcg_out_opc(s
, call
? OPC_CALL_Jz
: OPC_JMP_long
, 0, 0, 0);
1521 /* rip-relative addressing into the constant pool.
1522 This is 6 + 8 = 14 bytes, as compared to using an
1523 an immediate load 10 + 6 = 16 bytes, plus we may
1524 be able to re-use the pool constant for more calls. */
1525 tcg_out_opc(s
, OPC_GRP5
, 0, 0, 0);
1526 tcg_out8(s
, (call
? EXT5_CALLN_Ev
: EXT5_JMPN_Ev
) << 3 | 5);
1527 new_pool_label(s
, (uintptr_t)dest
, R_386_PC32
, s
->code_ptr
, -4);
1532 static inline void tcg_out_call(TCGContext
*s
, tcg_insn_unit
*dest
)
1534 tcg_out_branch(s
, 1, dest
);
1537 static void tcg_out_jmp(TCGContext
*s
, tcg_insn_unit
*dest
)
1539 tcg_out_branch(s
, 0, dest
);
1542 static void tcg_out_nopn(TCGContext
*s
, int n
)
1545 /* Emit 1 or 2 operand size prefixes for the standard one byte nop,
1546 * "xchg %eax,%eax", forming "xchg %ax,%ax". All cores accept the
1547 * duplicate prefix, and all of the interesting recent cores can
1548 * decode and discard the duplicates in a single cycle.
1550 tcg_debug_assert(n
>= 1);
1551 for (i
= 1; i
< n
; ++i
) {
1557 #if defined(CONFIG_SOFTMMU)
1558 #include "tcg-ldst.inc.c"
1560 /* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
1561 * int mmu_idx, uintptr_t ra)
1563 static void * const qemu_ld_helpers
[16] = {
1564 [MO_UB
] = helper_ret_ldub_mmu
,
1565 [MO_LEUW
] = helper_le_lduw_mmu
,
1566 [MO_LEUL
] = helper_le_ldul_mmu
,
1567 [MO_LEQ
] = helper_le_ldq_mmu
,
1568 [MO_BEUW
] = helper_be_lduw_mmu
,
1569 [MO_BEUL
] = helper_be_ldul_mmu
,
1570 [MO_BEQ
] = helper_be_ldq_mmu
,
1573 /* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr,
1574 * uintxx_t val, int mmu_idx, uintptr_t ra)
1576 static void * const qemu_st_helpers
[16] = {
1577 [MO_UB
] = helper_ret_stb_mmu
,
1578 [MO_LEUW
] = helper_le_stw_mmu
,
1579 [MO_LEUL
] = helper_le_stl_mmu
,
1580 [MO_LEQ
] = helper_le_stq_mmu
,
1581 [MO_BEUW
] = helper_be_stw_mmu
,
1582 [MO_BEUL
] = helper_be_stl_mmu
,
1583 [MO_BEQ
] = helper_be_stq_mmu
,
1586 /* Perform the TLB load and compare.
1589 ADDRLO and ADDRHI contain the low and high part of the address.
1591 MEM_INDEX and S_BITS are the memory context and log2 size of the load.
1593 WHICH is the offset into the CPUTLBEntry structure of the slot to read.
1594 This should be offsetof addr_read or addr_write.
1597 LABEL_PTRS is filled with 1 (32-bit addresses) or 2 (64-bit addresses)
1598 positions of the displacements of forward jumps to the TLB miss case.
1600 Second argument register is loaded with the low part of the address.
1601 In the TLB hit case, it has been adjusted as indicated by the TLB
1602 and so is a host address. In the TLB miss case, it continues to
1603 hold a guest address.
1605 First argument register is clobbered. */
1607 static inline void tcg_out_tlb_load(TCGContext
*s
, TCGReg addrlo
, TCGReg addrhi
,
1608 int mem_index
, TCGMemOp opc
,
1609 tcg_insn_unit
**label_ptr
, int which
)
1611 const TCGReg r0
= TCG_REG_L0
;
1612 const TCGReg r1
= TCG_REG_L1
;
1613 TCGType ttype
= TCG_TYPE_I32
;
1614 TCGType tlbtype
= TCG_TYPE_I32
;
1615 int trexw
= 0, hrexw
= 0, tlbrexw
= 0;
1616 unsigned a_bits
= get_alignment_bits(opc
);
1617 unsigned s_bits
= opc
& MO_SIZE
;
1618 unsigned a_mask
= (1 << a_bits
) - 1;
1619 unsigned s_mask
= (1 << s_bits
) - 1;
1620 target_ulong tlb_mask
;
1622 if (TCG_TARGET_REG_BITS
== 64) {
1623 if (TARGET_LONG_BITS
== 64) {
1624 ttype
= TCG_TYPE_I64
;
1627 if (TCG_TYPE_PTR
== TCG_TYPE_I64
) {
1629 if (TARGET_PAGE_BITS
+ CPU_TLB_BITS
> 32) {
1630 tlbtype
= TCG_TYPE_I64
;
1636 tcg_out_mov(s
, tlbtype
, r0
, addrlo
);
1637 /* If the required alignment is at least as large as the access, simply
1638 copy the address and mask. For lesser alignments, check that we don't
1639 cross pages for the complete access. */
1640 if (a_bits
>= s_bits
) {
1641 tcg_out_mov(s
, ttype
, r1
, addrlo
);
1643 tcg_out_modrm_offset(s
, OPC_LEA
+ trexw
, r1
, addrlo
, s_mask
- a_mask
);
1645 tlb_mask
= (target_ulong
)TARGET_PAGE_MASK
| a_mask
;
1647 tcg_out_shifti(s
, SHIFT_SHR
+ tlbrexw
, r0
,
1648 TARGET_PAGE_BITS
- CPU_TLB_ENTRY_BITS
);
1650 tgen_arithi(s
, ARITH_AND
+ trexw
, r1
, tlb_mask
, 0);
1651 tgen_arithi(s
, ARITH_AND
+ tlbrexw
, r0
,
1652 (CPU_TLB_SIZE
- 1) << CPU_TLB_ENTRY_BITS
, 0);
1654 tcg_out_modrm_sib_offset(s
, OPC_LEA
+ hrexw
, r0
, TCG_AREG0
, r0
, 0,
1655 offsetof(CPUArchState
, tlb_table
[mem_index
][0])
1659 tcg_out_modrm_offset(s
, OPC_CMP_GvEv
+ trexw
, r1
, r0
, 0);
1661 /* Prepare for both the fast path add of the tlb addend, and the slow
1662 path function argument setup. There are two cases worth note:
1663 For 32-bit guest and x86_64 host, MOVL zero-extends the guest address
1664 before the fastpath ADDQ below. For 64-bit guest and x32 host, MOVQ
1665 copies the entire guest address for the slow path, while truncation
1666 for the 32-bit host happens with the fastpath ADDL below. */
1667 tcg_out_mov(s
, ttype
, r1
, addrlo
);
1670 tcg_out_opc(s
, OPC_JCC_long
+ JCC_JNE
, 0, 0, 0);
1671 label_ptr
[0] = s
->code_ptr
;
1674 if (TARGET_LONG_BITS
> TCG_TARGET_REG_BITS
) {
1675 /* cmp 4(r0), addrhi */
1676 tcg_out_modrm_offset(s
, OPC_CMP_GvEv
, addrhi
, r0
, 4);
1679 tcg_out_opc(s
, OPC_JCC_long
+ JCC_JNE
, 0, 0, 0);
1680 label_ptr
[1] = s
->code_ptr
;
1686 /* add addend(r0), r1 */
1687 tcg_out_modrm_offset(s
, OPC_ADD_GvEv
+ hrexw
, r1
, r0
,
1688 offsetof(CPUTLBEntry
, addend
) - which
);
1692 * Record the context of a call to the out of line helper code for the slow path
1693 * for a load or store, so that we can later generate the correct helper code
1695 static void add_qemu_ldst_label(TCGContext
*s
, bool is_ld
, TCGMemOpIdx oi
,
1696 TCGReg datalo
, TCGReg datahi
,
1697 TCGReg addrlo
, TCGReg addrhi
,
1698 tcg_insn_unit
*raddr
,
1699 tcg_insn_unit
**label_ptr
)
1701 TCGLabelQemuLdst
*label
= new_ldst_label(s
);
1703 label
->is_ld
= is_ld
;
1705 label
->datalo_reg
= datalo
;
1706 label
->datahi_reg
= datahi
;
1707 label
->addrlo_reg
= addrlo
;
1708 label
->addrhi_reg
= addrhi
;
1709 label
->raddr
= raddr
;
1710 label
->label_ptr
[0] = label_ptr
[0];
1711 if (TARGET_LONG_BITS
> TCG_TARGET_REG_BITS
) {
1712 label
->label_ptr
[1] = label_ptr
[1];
1717 * Generate code for the slow path for a load at the end of block
1719 static void tcg_out_qemu_ld_slow_path(TCGContext
*s
, TCGLabelQemuLdst
*l
)
1721 TCGMemOpIdx oi
= l
->oi
;
1722 TCGMemOp opc
= get_memop(oi
);
1724 tcg_insn_unit
**label_ptr
= &l
->label_ptr
[0];
1726 /* resolve label address */
1727 tcg_patch32(label_ptr
[0], s
->code_ptr
- label_ptr
[0] - 4);
1728 if (TARGET_LONG_BITS
> TCG_TARGET_REG_BITS
) {
1729 tcg_patch32(label_ptr
[1], s
->code_ptr
- label_ptr
[1] - 4);
1732 if (TCG_TARGET_REG_BITS
== 32) {
1735 tcg_out_st(s
, TCG_TYPE_PTR
, TCG_AREG0
, TCG_REG_ESP
, ofs
);
1738 tcg_out_st(s
, TCG_TYPE_I32
, l
->addrlo_reg
, TCG_REG_ESP
, ofs
);
1741 if (TARGET_LONG_BITS
== 64) {
1742 tcg_out_st(s
, TCG_TYPE_I32
, l
->addrhi_reg
, TCG_REG_ESP
, ofs
);
1746 tcg_out_sti(s
, TCG_TYPE_I32
, oi
, TCG_REG_ESP
, ofs
);
1749 tcg_out_sti(s
, TCG_TYPE_PTR
, (uintptr_t)l
->raddr
, TCG_REG_ESP
, ofs
);
1751 tcg_out_mov(s
, TCG_TYPE_PTR
, tcg_target_call_iarg_regs
[0], TCG_AREG0
);
1752 /* The second argument is already loaded with addrlo. */
1753 tcg_out_movi(s
, TCG_TYPE_I32
, tcg_target_call_iarg_regs
[2], oi
);
1754 tcg_out_movi(s
, TCG_TYPE_PTR
, tcg_target_call_iarg_regs
[3],
1755 (uintptr_t)l
->raddr
);
1758 tcg_out_call(s
, qemu_ld_helpers
[opc
& (MO_BSWAP
| MO_SIZE
)]);
1760 data_reg
= l
->datalo_reg
;
1761 switch (opc
& MO_SSIZE
) {
1763 tcg_out_ext8s(s
, data_reg
, TCG_REG_EAX
, P_REXW
);
1766 tcg_out_ext16s(s
, data_reg
, TCG_REG_EAX
, P_REXW
);
1768 #if TCG_TARGET_REG_BITS == 64
1770 tcg_out_ext32s(s
, data_reg
, TCG_REG_EAX
);
1775 /* Note that the helpers have zero-extended to tcg_target_long. */
1777 tcg_out_mov(s
, TCG_TYPE_I32
, data_reg
, TCG_REG_EAX
);
1780 if (TCG_TARGET_REG_BITS
== 64) {
1781 tcg_out_mov(s
, TCG_TYPE_I64
, data_reg
, TCG_REG_RAX
);
1782 } else if (data_reg
== TCG_REG_EDX
) {
1783 /* xchg %edx, %eax */
1784 tcg_out_opc(s
, OPC_XCHG_ax_r32
+ TCG_REG_EDX
, 0, 0, 0);
1785 tcg_out_mov(s
, TCG_TYPE_I32
, l
->datahi_reg
, TCG_REG_EAX
);
1787 tcg_out_mov(s
, TCG_TYPE_I32
, data_reg
, TCG_REG_EAX
);
1788 tcg_out_mov(s
, TCG_TYPE_I32
, l
->datahi_reg
, TCG_REG_EDX
);
1795 /* Jump to the code corresponding to next IR of qemu_st */
1796 tcg_out_jmp(s
, l
->raddr
);
1800 * Generate code for the slow path for a store at the end of block
1802 static void tcg_out_qemu_st_slow_path(TCGContext
*s
, TCGLabelQemuLdst
*l
)
1804 TCGMemOpIdx oi
= l
->oi
;
1805 TCGMemOp opc
= get_memop(oi
);
1806 TCGMemOp s_bits
= opc
& MO_SIZE
;
1807 tcg_insn_unit
**label_ptr
= &l
->label_ptr
[0];
1810 /* resolve label address */
1811 tcg_patch32(label_ptr
[0], s
->code_ptr
- label_ptr
[0] - 4);
1812 if (TARGET_LONG_BITS
> TCG_TARGET_REG_BITS
) {
1813 tcg_patch32(label_ptr
[1], s
->code_ptr
- label_ptr
[1] - 4);
1816 if (TCG_TARGET_REG_BITS
== 32) {
1819 tcg_out_st(s
, TCG_TYPE_PTR
, TCG_AREG0
, TCG_REG_ESP
, ofs
);
1822 tcg_out_st(s
, TCG_TYPE_I32
, l
->addrlo_reg
, TCG_REG_ESP
, ofs
);
1825 if (TARGET_LONG_BITS
== 64) {
1826 tcg_out_st(s
, TCG_TYPE_I32
, l
->addrhi_reg
, TCG_REG_ESP
, ofs
);
1830 tcg_out_st(s
, TCG_TYPE_I32
, l
->datalo_reg
, TCG_REG_ESP
, ofs
);
1833 if (s_bits
== MO_64
) {
1834 tcg_out_st(s
, TCG_TYPE_I32
, l
->datahi_reg
, TCG_REG_ESP
, ofs
);
1838 tcg_out_sti(s
, TCG_TYPE_I32
, oi
, TCG_REG_ESP
, ofs
);
1841 retaddr
= TCG_REG_EAX
;
1842 tcg_out_movi(s
, TCG_TYPE_PTR
, retaddr
, (uintptr_t)l
->raddr
);
1843 tcg_out_st(s
, TCG_TYPE_PTR
, retaddr
, TCG_REG_ESP
, ofs
);
1845 tcg_out_mov(s
, TCG_TYPE_PTR
, tcg_target_call_iarg_regs
[0], TCG_AREG0
);
1846 /* The second argument is already loaded with addrlo. */
1847 tcg_out_mov(s
, (s_bits
== MO_64
? TCG_TYPE_I64
: TCG_TYPE_I32
),
1848 tcg_target_call_iarg_regs
[2], l
->datalo_reg
);
1849 tcg_out_movi(s
, TCG_TYPE_I32
, tcg_target_call_iarg_regs
[3], oi
);
1851 if (ARRAY_SIZE(tcg_target_call_iarg_regs
) > 4) {
1852 retaddr
= tcg_target_call_iarg_regs
[4];
1853 tcg_out_movi(s
, TCG_TYPE_PTR
, retaddr
, (uintptr_t)l
->raddr
);
1855 retaddr
= TCG_REG_RAX
;
1856 tcg_out_movi(s
, TCG_TYPE_PTR
, retaddr
, (uintptr_t)l
->raddr
);
1857 tcg_out_st(s
, TCG_TYPE_PTR
, retaddr
, TCG_REG_ESP
,
1858 TCG_TARGET_CALL_STACK_OFFSET
);
1862 /* "Tail call" to the helper, with the return address back inline. */
1863 tcg_out_push(s
, retaddr
);
1864 tcg_out_jmp(s
, qemu_st_helpers
[opc
& (MO_BSWAP
| MO_SIZE
)]);
1866 #elif defined(__x86_64__) && defined(__linux__)
1867 # include <asm/prctl.h>
1868 # include <sys/prctl.h>
1870 int arch_prctl(int code
, unsigned long addr
);
1872 static int guest_base_flags
;
1873 static inline void setup_guest_base_seg(void)
1875 if (arch_prctl(ARCH_SET_GS
, guest_base
) == 0) {
1876 guest_base_flags
= P_GS
;
1880 # define guest_base_flags 0
1881 static inline void setup_guest_base_seg(void) { }
1882 #endif /* SOFTMMU */
1884 static void tcg_out_qemu_ld_direct(TCGContext
*s
, TCGReg datalo
, TCGReg datahi
,
1885 TCGReg base
, int index
, intptr_t ofs
,
1886 int seg
, TCGMemOp memop
)
1888 const TCGMemOp real_bswap
= memop
& MO_BSWAP
;
1889 TCGMemOp bswap
= real_bswap
;
1890 int movop
= OPC_MOVL_GvEv
;
1892 if (have_movbe
&& real_bswap
) {
1894 movop
= OPC_MOVBE_GyMy
;
1897 switch (memop
& MO_SSIZE
) {
1899 tcg_out_modrm_sib_offset(s
, OPC_MOVZBL
+ seg
, datalo
,
1900 base
, index
, 0, ofs
);
1903 tcg_out_modrm_sib_offset(s
, OPC_MOVSBL
+ P_REXW
+ seg
, datalo
,
1904 base
, index
, 0, ofs
);
1907 tcg_out_modrm_sib_offset(s
, OPC_MOVZWL
+ seg
, datalo
,
1908 base
, index
, 0, ofs
);
1910 tcg_out_rolw_8(s
, datalo
);
1916 tcg_out_modrm_sib_offset(s
, OPC_MOVBE_GyMy
+ P_DATA16
+ seg
,
1917 datalo
, base
, index
, 0, ofs
);
1919 tcg_out_modrm_sib_offset(s
, OPC_MOVZWL
+ seg
, datalo
,
1920 base
, index
, 0, ofs
);
1921 tcg_out_rolw_8(s
, datalo
);
1923 tcg_out_modrm(s
, OPC_MOVSWL
+ P_REXW
, datalo
, datalo
);
1925 tcg_out_modrm_sib_offset(s
, OPC_MOVSWL
+ P_REXW
+ seg
,
1926 datalo
, base
, index
, 0, ofs
);
1930 tcg_out_modrm_sib_offset(s
, movop
+ seg
, datalo
, base
, index
, 0, ofs
);
1932 tcg_out_bswap32(s
, datalo
);
1935 #if TCG_TARGET_REG_BITS == 64
1938 tcg_out_modrm_sib_offset(s
, movop
+ seg
, datalo
,
1939 base
, index
, 0, ofs
);
1941 tcg_out_bswap32(s
, datalo
);
1943 tcg_out_ext32s(s
, datalo
, datalo
);
1945 tcg_out_modrm_sib_offset(s
, OPC_MOVSLQ
+ seg
, datalo
,
1946 base
, index
, 0, ofs
);
1951 if (TCG_TARGET_REG_BITS
== 64) {
1952 tcg_out_modrm_sib_offset(s
, movop
+ P_REXW
+ seg
, datalo
,
1953 base
, index
, 0, ofs
);
1955 tcg_out_bswap64(s
, datalo
);
1963 if (base
!= datalo
) {
1964 tcg_out_modrm_sib_offset(s
, movop
+ seg
, datalo
,
1965 base
, index
, 0, ofs
);
1966 tcg_out_modrm_sib_offset(s
, movop
+ seg
, datahi
,
1967 base
, index
, 0, ofs
+ 4);
1969 tcg_out_modrm_sib_offset(s
, movop
+ seg
, datahi
,
1970 base
, index
, 0, ofs
+ 4);
1971 tcg_out_modrm_sib_offset(s
, movop
+ seg
, datalo
,
1972 base
, index
, 0, ofs
);
1975 tcg_out_bswap32(s
, datalo
);
1976 tcg_out_bswap32(s
, datahi
);
1985 /* XXX: qemu_ld and qemu_st could be modified to clobber only EDX and
1986 EAX. It will be useful once fixed registers globals are less
1988 static void tcg_out_qemu_ld(TCGContext
*s
, const TCGArg
*args
, bool is64
)
1990 TCGReg datalo
, datahi
, addrlo
;
1991 TCGReg addrhi
__attribute__((unused
));
1994 #if defined(CONFIG_SOFTMMU)
1996 tcg_insn_unit
*label_ptr
[2];
2000 datahi
= (TCG_TARGET_REG_BITS
== 32 && is64
? *args
++ : 0);
2002 addrhi
= (TARGET_LONG_BITS
> TCG_TARGET_REG_BITS
? *args
++ : 0);
2004 opc
= get_memop(oi
);
2006 #if defined(CONFIG_SOFTMMU)
2007 mem_index
= get_mmuidx(oi
);
2009 tcg_out_tlb_load(s
, addrlo
, addrhi
, mem_index
, opc
,
2010 label_ptr
, offsetof(CPUTLBEntry
, addr_read
));
2013 tcg_out_qemu_ld_direct(s
, datalo
, datahi
, TCG_REG_L1
, -1, 0, 0, opc
);
2015 /* Record the current context of a load into ldst label */
2016 add_qemu_ldst_label(s
, true, oi
, datalo
, datahi
, addrlo
, addrhi
,
2017 s
->code_ptr
, label_ptr
);
2020 int32_t offset
= guest_base
;
2021 TCGReg base
= addrlo
;
2025 /* For a 32-bit guest, the high 32 bits may contain garbage.
2026 We can do this with the ADDR32 prefix if we're not using
2027 a guest base, or when using segmentation. Otherwise we
2028 need to zero-extend manually. */
2029 if (guest_base
== 0 || guest_base_flags
) {
2030 seg
= guest_base_flags
;
2032 if (TCG_TARGET_REG_BITS
> TARGET_LONG_BITS
) {
2035 } else if (TCG_TARGET_REG_BITS
== 64) {
2036 if (TARGET_LONG_BITS
== 32) {
2037 tcg_out_ext32u(s
, TCG_REG_L0
, base
);
2040 if (offset
!= guest_base
) {
2041 tcg_out_movi(s
, TCG_TYPE_I64
, TCG_REG_L1
, guest_base
);
2047 tcg_out_qemu_ld_direct(s
, datalo
, datahi
,
2048 base
, index
, offset
, seg
, opc
);
2053 static void tcg_out_qemu_st_direct(TCGContext
*s
, TCGReg datalo
, TCGReg datahi
,
2054 TCGReg base
, intptr_t ofs
, int seg
,
2057 /* ??? Ideally we wouldn't need a scratch register. For user-only,
2058 we could perform the bswap twice to restore the original value
2059 instead of moving to the scratch. But as it is, the L constraint
2060 means that TCG_REG_L0 is definitely free here. */
2061 const TCGReg scratch
= TCG_REG_L0
;
2062 const TCGMemOp real_bswap
= memop
& MO_BSWAP
;
2063 TCGMemOp bswap
= real_bswap
;
2064 int movop
= OPC_MOVL_EvGv
;
2066 if (have_movbe
&& real_bswap
) {
2068 movop
= OPC_MOVBE_MyGy
;
2071 switch (memop
& MO_SIZE
) {
2073 /* In 32-bit mode, 8-bit stores can only happen from [abcd]x.
2074 Use the scratch register if necessary. */
2075 if (TCG_TARGET_REG_BITS
== 32 && datalo
>= 4) {
2076 tcg_out_mov(s
, TCG_TYPE_I32
, scratch
, datalo
);
2079 tcg_out_modrm_offset(s
, OPC_MOVB_EvGv
+ P_REXB_R
+ seg
,
2084 tcg_out_mov(s
, TCG_TYPE_I32
, scratch
, datalo
);
2085 tcg_out_rolw_8(s
, scratch
);
2088 tcg_out_modrm_offset(s
, movop
+ P_DATA16
+ seg
, datalo
, base
, ofs
);
2092 tcg_out_mov(s
, TCG_TYPE_I32
, scratch
, datalo
);
2093 tcg_out_bswap32(s
, scratch
);
2096 tcg_out_modrm_offset(s
, movop
+ seg
, datalo
, base
, ofs
);
2099 if (TCG_TARGET_REG_BITS
== 64) {
2101 tcg_out_mov(s
, TCG_TYPE_I64
, scratch
, datalo
);
2102 tcg_out_bswap64(s
, scratch
);
2105 tcg_out_modrm_offset(s
, movop
+ P_REXW
+ seg
, datalo
, base
, ofs
);
2107 tcg_out_mov(s
, TCG_TYPE_I32
, scratch
, datahi
);
2108 tcg_out_bswap32(s
, scratch
);
2109 tcg_out_modrm_offset(s
, OPC_MOVL_EvGv
+ seg
, scratch
, base
, ofs
);
2110 tcg_out_mov(s
, TCG_TYPE_I32
, scratch
, datalo
);
2111 tcg_out_bswap32(s
, scratch
);
2112 tcg_out_modrm_offset(s
, OPC_MOVL_EvGv
+ seg
, scratch
, base
, ofs
+4);
2119 tcg_out_modrm_offset(s
, movop
+ seg
, datalo
, base
, ofs
);
2120 tcg_out_modrm_offset(s
, movop
+ seg
, datahi
, base
, ofs
+4);
2128 static void tcg_out_qemu_st(TCGContext
*s
, const TCGArg
*args
, bool is64
)
2130 TCGReg datalo
, datahi
, addrlo
;
2131 TCGReg addrhi
__attribute__((unused
));
2134 #if defined(CONFIG_SOFTMMU)
2136 tcg_insn_unit
*label_ptr
[2];
2140 datahi
= (TCG_TARGET_REG_BITS
== 32 && is64
? *args
++ : 0);
2142 addrhi
= (TARGET_LONG_BITS
> TCG_TARGET_REG_BITS
? *args
++ : 0);
2144 opc
= get_memop(oi
);
2146 #if defined(CONFIG_SOFTMMU)
2147 mem_index
= get_mmuidx(oi
);
2149 tcg_out_tlb_load(s
, addrlo
, addrhi
, mem_index
, opc
,
2150 label_ptr
, offsetof(CPUTLBEntry
, addr_write
));
2153 tcg_out_qemu_st_direct(s
, datalo
, datahi
, TCG_REG_L1
, 0, 0, opc
);
2155 /* Record the current context of a store into ldst label */
2156 add_qemu_ldst_label(s
, false, oi
, datalo
, datahi
, addrlo
, addrhi
,
2157 s
->code_ptr
, label_ptr
);
2160 int32_t offset
= guest_base
;
2161 TCGReg base
= addrlo
;
2164 /* See comment in tcg_out_qemu_ld re zero-extension of addrlo. */
2165 if (guest_base
== 0 || guest_base_flags
) {
2166 seg
= guest_base_flags
;
2168 if (TCG_TARGET_REG_BITS
> TARGET_LONG_BITS
) {
2171 } else if (TCG_TARGET_REG_BITS
== 64) {
2172 /* ??? Note that we can't use the same SIB addressing scheme
2173 as for loads, since we require L0 free for bswap. */
2174 if (offset
!= guest_base
) {
2175 if (TARGET_LONG_BITS
== 32) {
2176 tcg_out_ext32u(s
, TCG_REG_L0
, base
);
2179 tcg_out_movi(s
, TCG_TYPE_I64
, TCG_REG_L1
, guest_base
);
2180 tgen_arithr(s
, ARITH_ADD
+ P_REXW
, TCG_REG_L1
, base
);
2183 } else if (TARGET_LONG_BITS
== 32) {
2184 tcg_out_ext32u(s
, TCG_REG_L1
, base
);
2189 tcg_out_qemu_st_direct(s
, datalo
, datahi
, base
, offset
, seg
, opc
);
2194 static inline void tcg_out_op(TCGContext
*s
, TCGOpcode opc
,
2195 const TCGArg
*args
, const int *const_args
)
2198 int c
, const_a2
, vexop
, rexw
= 0;
2200 #if TCG_TARGET_REG_BITS == 64
2201 # define OP_32_64(x) \
2202 case glue(glue(INDEX_op_, x), _i64): \
2203 rexw = P_REXW; /* FALLTHRU */ \
2204 case glue(glue(INDEX_op_, x), _i32)
2206 # define OP_32_64(x) \
2207 case glue(glue(INDEX_op_, x), _i32)
2210 /* Hoist the loads of the most common arguments. */
2214 const_a2
= const_args
[2];
2217 case INDEX_op_exit_tb
:
2218 /* Reuse the zeroing that exists for goto_ptr. */
2220 tcg_out_jmp(s
, s
->code_gen_epilogue
);
2222 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_REG_EAX
, a0
);
2223 tcg_out_jmp(s
, tb_ret_addr
);
2226 case INDEX_op_goto_tb
:
2227 if (s
->tb_jmp_insn_offset
) {
2228 /* direct jump method */
2230 /* jump displacement must be aligned for atomic patching;
2231 * see if we need to add extra nops before jump
2233 gap
= tcg_pcrel_diff(s
, QEMU_ALIGN_PTR_UP(s
->code_ptr
+ 1, 4));
2235 tcg_out_nopn(s
, gap
- 1);
2237 tcg_out8(s
, OPC_JMP_long
); /* jmp im */
2238 s
->tb_jmp_insn_offset
[a0
] = tcg_current_code_size(s
);
2241 /* indirect jump method */
2242 tcg_out_modrm_offset(s
, OPC_GRP5
, EXT5_JMPN_Ev
, -1,
2243 (intptr_t)(s
->tb_jmp_target_addr
+ a0
));
2245 set_jmp_reset_offset(s
, a0
);
2247 case INDEX_op_goto_ptr
:
2248 /* jmp to the given host address (could be epilogue) */
2249 tcg_out_modrm(s
, OPC_GRP5
, EXT5_JMPN_Ev
, a0
);
2252 tcg_out_jxx(s
, JCC_JMP
, arg_label(a0
), 0);
2255 /* Note that we can ignore REXW for the zero-extend to 64-bit. */
2256 tcg_out_modrm_offset(s
, OPC_MOVZBL
, a0
, a1
, a2
);
2259 tcg_out_modrm_offset(s
, OPC_MOVSBL
+ rexw
, a0
, a1
, a2
);
2262 /* Note that we can ignore REXW for the zero-extend to 64-bit. */
2263 tcg_out_modrm_offset(s
, OPC_MOVZWL
, a0
, a1
, a2
);
2266 tcg_out_modrm_offset(s
, OPC_MOVSWL
+ rexw
, a0
, a1
, a2
);
2268 #if TCG_TARGET_REG_BITS == 64
2269 case INDEX_op_ld32u_i64
:
2271 case INDEX_op_ld_i32
:
2272 tcg_out_ld(s
, TCG_TYPE_I32
, a0
, a1
, a2
);
2276 if (const_args
[0]) {
2277 tcg_out_modrm_offset(s
, OPC_MOVB_EvIz
, 0, a1
, a2
);
2280 tcg_out_modrm_offset(s
, OPC_MOVB_EvGv
| P_REXB_R
, a0
, a1
, a2
);
2284 if (const_args
[0]) {
2285 tcg_out_modrm_offset(s
, OPC_MOVL_EvIz
| P_DATA16
, 0, a1
, a2
);
2288 tcg_out_modrm_offset(s
, OPC_MOVL_EvGv
| P_DATA16
, a0
, a1
, a2
);
2291 #if TCG_TARGET_REG_BITS == 64
2292 case INDEX_op_st32_i64
:
2294 case INDEX_op_st_i32
:
2295 if (const_args
[0]) {
2296 tcg_out_modrm_offset(s
, OPC_MOVL_EvIz
, 0, a1
, a2
);
2299 tcg_out_st(s
, TCG_TYPE_I32
, a0
, a1
, a2
);
2304 /* For 3-operand addition, use LEA. */
2309 } else if (a0
== a2
) {
2310 /* Watch out for dest = src + dest, since we've removed
2311 the matching constraint on the add. */
2312 tgen_arithr(s
, ARITH_ADD
+ rexw
, a0
, a1
);
2316 tcg_out_modrm_sib_offset(s
, OPC_LEA
+ rexw
, a0
, a1
, a2
, 0, c3
);
2335 tgen_arithi(s
, c
+ rexw
, a0
, a2
, 0);
2337 tgen_arithr(s
, c
+ rexw
, a0
, a2
);
2343 tcg_out_mov(s
, rexw
? TCG_TYPE_I64
: TCG_TYPE_I32
, a0
, a1
);
2344 tgen_arithi(s
, ARITH_AND
+ rexw
, a0
, ~a2
, 0);
2346 tcg_out_vex_modrm(s
, OPC_ANDN
+ rexw
, a0
, a2
, a1
);
2354 if (val
== (int8_t)val
) {
2355 tcg_out_modrm(s
, OPC_IMUL_GvEvIb
+ rexw
, a0
, a0
);
2358 tcg_out_modrm(s
, OPC_IMUL_GvEvIz
+ rexw
, a0
, a0
);
2362 tcg_out_modrm(s
, OPC_IMUL_GvEv
+ rexw
, a0
, a2
);
2367 tcg_out_modrm(s
, OPC_GRP3_Ev
+ rexw
, EXT3_IDIV
, args
[4]);
2370 tcg_out_modrm(s
, OPC_GRP3_Ev
+ rexw
, EXT3_DIV
, args
[4]);
2374 /* For small constant 3-operand shift, use LEA. */
2375 if (const_a2
&& a0
!= a1
&& (a2
- 1) < 3) {
2377 /* shl $1,a1,a0 -> lea (a1,a1),a0 */
2378 tcg_out_modrm_sib_offset(s
, OPC_LEA
+ rexw
, a0
, a1
, a1
, 0, 0);
2380 /* shl $n,a1,a0 -> lea 0(,a1,n),a0 */
2381 tcg_out_modrm_sib_offset(s
, OPC_LEA
+ rexw
, a0
, -1, a1
, a2
, 0);
2387 goto gen_shift_maybe_vex
;
2391 goto gen_shift_maybe_vex
;
2395 goto gen_shift_maybe_vex
;
2402 gen_shift_maybe_vex
:
2405 tcg_out_vex_modrm(s
, vexop
+ rexw
, a0
, a2
, a1
);
2408 tcg_out_mov(s
, rexw
? TCG_TYPE_I64
: TCG_TYPE_I32
, a0
, a1
);
2413 tcg_out_shifti(s
, c
+ rexw
, a0
, a2
);
2415 tcg_out_modrm(s
, OPC_SHIFT_cl
+ rexw
, c
, a0
);
2420 tcg_out_ctz(s
, rexw
, args
[0], args
[1], args
[2], const_args
[2]);
2423 tcg_out_clz(s
, rexw
, args
[0], args
[1], args
[2], const_args
[2]);
2426 tcg_out_modrm(s
, OPC_POPCNT
+ rexw
, a0
, a1
);
2429 case INDEX_op_brcond_i32
:
2430 tcg_out_brcond32(s
, a2
, a0
, a1
, const_args
[1], arg_label(args
[3]), 0);
2432 case INDEX_op_setcond_i32
:
2433 tcg_out_setcond32(s
, args
[3], a0
, a1
, a2
, const_a2
);
2435 case INDEX_op_movcond_i32
:
2436 tcg_out_movcond32(s
, args
[5], a0
, a1
, a2
, const_a2
, args
[3]);
2440 tcg_out_rolw_8(s
, a0
);
2443 tcg_out_bswap32(s
, a0
);
2447 tcg_out_modrm(s
, OPC_GRP3_Ev
+ rexw
, EXT3_NEG
, a0
);
2450 tcg_out_modrm(s
, OPC_GRP3_Ev
+ rexw
, EXT3_NOT
, a0
);
2454 tcg_out_ext8s(s
, a0
, a1
, rexw
);
2457 tcg_out_ext16s(s
, a0
, a1
, rexw
);
2460 tcg_out_ext8u(s
, a0
, a1
);
2463 tcg_out_ext16u(s
, a0
, a1
);
2466 case INDEX_op_qemu_ld_i32
:
2467 tcg_out_qemu_ld(s
, args
, 0);
2469 case INDEX_op_qemu_ld_i64
:
2470 tcg_out_qemu_ld(s
, args
, 1);
2472 case INDEX_op_qemu_st_i32
:
2473 tcg_out_qemu_st(s
, args
, 0);
2475 case INDEX_op_qemu_st_i64
:
2476 tcg_out_qemu_st(s
, args
, 1);
2480 tcg_out_modrm(s
, OPC_GRP3_Ev
+ rexw
, EXT3_MUL
, args
[3]);
2483 tcg_out_modrm(s
, OPC_GRP3_Ev
+ rexw
, EXT3_IMUL
, args
[3]);
2486 if (const_args
[4]) {
2487 tgen_arithi(s
, ARITH_ADD
+ rexw
, a0
, args
[4], 1);
2489 tgen_arithr(s
, ARITH_ADD
+ rexw
, a0
, args
[4]);
2491 if (const_args
[5]) {
2492 tgen_arithi(s
, ARITH_ADC
+ rexw
, a1
, args
[5], 1);
2494 tgen_arithr(s
, ARITH_ADC
+ rexw
, a1
, args
[5]);
2498 if (const_args
[4]) {
2499 tgen_arithi(s
, ARITH_SUB
+ rexw
, a0
, args
[4], 1);
2501 tgen_arithr(s
, ARITH_SUB
+ rexw
, a0
, args
[4]);
2503 if (const_args
[5]) {
2504 tgen_arithi(s
, ARITH_SBB
+ rexw
, a1
, args
[5], 1);
2506 tgen_arithr(s
, ARITH_SBB
+ rexw
, a1
, args
[5]);
2510 #if TCG_TARGET_REG_BITS == 32
2511 case INDEX_op_brcond2_i32
:
2512 tcg_out_brcond2(s
, args
, const_args
, 0);
2514 case INDEX_op_setcond2_i32
:
2515 tcg_out_setcond2(s
, args
, const_args
);
2517 #else /* TCG_TARGET_REG_BITS == 64 */
2518 case INDEX_op_ld32s_i64
:
2519 tcg_out_modrm_offset(s
, OPC_MOVSLQ
, a0
, a1
, a2
);
2521 case INDEX_op_ld_i64
:
2522 tcg_out_ld(s
, TCG_TYPE_I64
, a0
, a1
, a2
);
2524 case INDEX_op_st_i64
:
2525 if (const_args
[0]) {
2526 tcg_out_modrm_offset(s
, OPC_MOVL_EvIz
| P_REXW
, 0, a1
, a2
);
2529 tcg_out_st(s
, TCG_TYPE_I64
, a0
, a1
, a2
);
2533 case INDEX_op_brcond_i64
:
2534 tcg_out_brcond64(s
, a2
, a0
, a1
, const_args
[1], arg_label(args
[3]), 0);
2536 case INDEX_op_setcond_i64
:
2537 tcg_out_setcond64(s
, args
[3], a0
, a1
, a2
, const_a2
);
2539 case INDEX_op_movcond_i64
:
2540 tcg_out_movcond64(s
, args
[5], a0
, a1
, a2
, const_a2
, args
[3]);
2543 case INDEX_op_bswap64_i64
:
2544 tcg_out_bswap64(s
, a0
);
2546 case INDEX_op_extu_i32_i64
:
2547 case INDEX_op_ext32u_i64
:
2548 tcg_out_ext32u(s
, a0
, a1
);
2550 case INDEX_op_ext_i32_i64
:
2551 case INDEX_op_ext32s_i64
:
2552 tcg_out_ext32s(s
, a0
, a1
);
2557 if (args
[3] == 0 && args
[4] == 8) {
2558 /* load bits 0..7 */
2559 tcg_out_modrm(s
, OPC_MOVB_EvGv
| P_REXB_R
| P_REXB_RM
, a2
, a0
);
2560 } else if (args
[3] == 8 && args
[4] == 8) {
2561 /* load bits 8..15 */
2562 tcg_out_modrm(s
, OPC_MOVB_EvGv
, a2
, a0
+ 4);
2563 } else if (args
[3] == 0 && args
[4] == 16) {
2564 /* load bits 0..15 */
2565 tcg_out_modrm(s
, OPC_MOVL_EvGv
| P_DATA16
, a2
, a0
);
2571 case INDEX_op_extract_i64
:
2572 if (a2
+ args
[3] == 32) {
2573 /* This is a 32-bit zero-extending right shift. */
2574 tcg_out_mov(s
, TCG_TYPE_I32
, a0
, a1
);
2575 tcg_out_shifti(s
, SHIFT_SHR
, a0
, a2
);
2579 case INDEX_op_extract_i32
:
2580 /* On the off-chance that we can use the high-byte registers.
2581 Otherwise we emit the same ext16 + shift pattern that we
2582 would have gotten from the normal tcg-op.c expansion. */
2583 tcg_debug_assert(a2
== 8 && args
[3] == 8);
2584 if (a1
< 4 && a0
< 8) {
2585 tcg_out_modrm(s
, OPC_MOVZBL
, a0
, a1
+ 4);
2587 tcg_out_ext16u(s
, a0
, a1
);
2588 tcg_out_shifti(s
, SHIFT_SHR
, a0
, 8);
2592 case INDEX_op_sextract_i32
:
2593 /* We don't implement sextract_i64, as we cannot sign-extend to
2594 64-bits without using the REX prefix that explicitly excludes
2595 access to the high-byte registers. */
2596 tcg_debug_assert(a2
== 8 && args
[3] == 8);
2597 if (a1
< 4 && a0
< 8) {
2598 tcg_out_modrm(s
, OPC_MOVSBL
, a0
, a1
+ 4);
2600 tcg_out_ext16s(s
, a0
, a1
, 0);
2601 tcg_out_shifti(s
, SHIFT_SAR
, a0
, 8);
2608 case INDEX_op_mov_i32
: /* Always emitted via tcg_out_mov. */
2609 case INDEX_op_mov_i64
:
2610 case INDEX_op_mov_vec
:
2611 case INDEX_op_movi_i32
: /* Always emitted via tcg_out_movi. */
2612 case INDEX_op_movi_i64
:
2613 case INDEX_op_dupi_vec
:
2614 case INDEX_op_call
: /* Always emitted via tcg_out_call. */
2622 static void tcg_out_vec_op(TCGContext
*s
, TCGOpcode opc
,
2623 unsigned vecl
, unsigned vece
,
2624 const TCGArg
*args
, const int *const_args
)
2626 static int const add_insn
[4] = {
2627 OPC_PADDB
, OPC_PADDW
, OPC_PADDD
, OPC_PADDQ
2629 static int const sub_insn
[4] = {
2630 OPC_PSUBB
, OPC_PSUBW
, OPC_PSUBD
, OPC_PSUBQ
2632 static int const mul_insn
[4] = {
2633 OPC_UD2
, OPC_PMULLW
, OPC_PMULLD
, OPC_UD2
2635 static int const shift_imm_insn
[4] = {
2636 OPC_UD2
, OPC_PSHIFTW_Ib
, OPC_PSHIFTD_Ib
, OPC_PSHIFTQ_Ib
2638 static int const cmpeq_insn
[4] = {
2639 OPC_PCMPEQB
, OPC_PCMPEQW
, OPC_PCMPEQD
, OPC_PCMPEQQ
2641 static int const cmpgt_insn
[4] = {
2642 OPC_PCMPGTB
, OPC_PCMPGTW
, OPC_PCMPGTD
, OPC_PCMPGTQ
2644 static int const punpckl_insn
[4] = {
2645 OPC_PUNPCKLBW
, OPC_PUNPCKLWD
, OPC_PUNPCKLDQ
, OPC_PUNPCKLQDQ
2647 static int const punpckh_insn
[4] = {
2648 OPC_PUNPCKHBW
, OPC_PUNPCKHWD
, OPC_PUNPCKHDQ
, OPC_PUNPCKHQDQ
2650 static int const packss_insn
[4] = {
2651 OPC_PACKSSWB
, OPC_PACKSSDW
, OPC_UD2
, OPC_UD2
2653 static int const packus_insn
[4] = {
2654 OPC_PACKUSWB
, OPC_PACKUSDW
, OPC_UD2
, OPC_UD2
2657 TCGType type
= vecl
+ TCG_TYPE_V64
;
2666 case INDEX_op_add_vec
:
2667 insn
= add_insn
[vece
];
2669 case INDEX_op_sub_vec
:
2670 insn
= sub_insn
[vece
];
2672 case INDEX_op_mul_vec
:
2673 insn
= mul_insn
[vece
];
2675 case INDEX_op_and_vec
:
2678 case INDEX_op_or_vec
:
2681 case INDEX_op_xor_vec
:
2684 case INDEX_op_x86_punpckl_vec
:
2685 insn
= punpckl_insn
[vece
];
2687 case INDEX_op_x86_punpckh_vec
:
2688 insn
= punpckh_insn
[vece
];
2690 case INDEX_op_x86_packss_vec
:
2691 insn
= packss_insn
[vece
];
2693 case INDEX_op_x86_packus_vec
:
2694 insn
= packus_insn
[vece
];
2696 #if TCG_TARGET_REG_BITS == 32
2697 case INDEX_op_dup2_vec
:
2698 /* Constraints have already placed both 32-bit inputs in xmm regs. */
2699 insn
= OPC_PUNPCKLDQ
;
2703 tcg_debug_assert(insn
!= OPC_UD2
);
2704 if (type
== TCG_TYPE_V256
) {
2707 tcg_out_vex_modrm(s
, insn
, a0
, a1
, a2
);
2710 case INDEX_op_cmp_vec
:
2712 if (sub
== TCG_COND_EQ
) {
2713 insn
= cmpeq_insn
[vece
];
2714 } else if (sub
== TCG_COND_GT
) {
2715 insn
= cmpgt_insn
[vece
];
2717 g_assert_not_reached();
2721 case INDEX_op_andc_vec
:
2723 if (type
== TCG_TYPE_V256
) {
2726 tcg_out_vex_modrm(s
, insn
, a0
, a2
, a1
);
2729 case INDEX_op_shli_vec
:
2732 case INDEX_op_shri_vec
:
2735 case INDEX_op_sari_vec
:
2736 tcg_debug_assert(vece
!= MO_64
);
2739 tcg_debug_assert(vece
!= MO_8
);
2740 insn
= shift_imm_insn
[vece
];
2741 if (type
== TCG_TYPE_V256
) {
2744 tcg_out_vex_modrm(s
, insn
, sub
, a0
, a1
);
2748 case INDEX_op_ld_vec
:
2749 tcg_out_ld(s
, type
, a0
, a1
, a2
);
2751 case INDEX_op_st_vec
:
2752 tcg_out_st(s
, type
, a0
, a1
, a2
);
2754 case INDEX_op_dup_vec
:
2755 tcg_out_dup_vec(s
, type
, vece
, a0
, a1
);
2758 case INDEX_op_x86_shufps_vec
:
2762 case INDEX_op_x86_blend_vec
:
2763 if (vece
== MO_16
) {
2765 } else if (vece
== MO_32
) {
2766 insn
= (have_avx2
? OPC_VPBLENDD
: OPC_BLENDPS
);
2768 g_assert_not_reached();
2772 case INDEX_op_x86_vperm2i128_vec
:
2773 insn
= OPC_VPERM2I128
;
2777 if (type
== TCG_TYPE_V256
) {
2780 tcg_out_vex_modrm(s
, insn
, a0
, a1
, a2
);
2784 case INDEX_op_x86_vpblendvb_vec
:
2785 insn
= OPC_VPBLENDVB
;
2786 if (type
== TCG_TYPE_V256
) {
2789 tcg_out_vex_modrm(s
, insn
, a0
, a1
, a2
);
2790 tcg_out8(s
, args
[3] << 4);
2793 case INDEX_op_x86_psrldq_vec
:
2794 tcg_out_vex_modrm(s
, OPC_GRP14
, 3, a0
, a1
);
2799 g_assert_not_reached();
2803 static const TCGTargetOpDef
*tcg_target_op_def(TCGOpcode op
)
2805 static const TCGTargetOpDef r
= { .args_ct_str
= { "r" } };
2806 static const TCGTargetOpDef ri_r
= { .args_ct_str
= { "ri", "r" } };
2807 static const TCGTargetOpDef re_r
= { .args_ct_str
= { "re", "r" } };
2808 static const TCGTargetOpDef qi_r
= { .args_ct_str
= { "qi", "r" } };
2809 static const TCGTargetOpDef r_r
= { .args_ct_str
= { "r", "r" } };
2810 static const TCGTargetOpDef r_q
= { .args_ct_str
= { "r", "q" } };
2811 static const TCGTargetOpDef r_re
= { .args_ct_str
= { "r", "re" } };
2812 static const TCGTargetOpDef r_0
= { .args_ct_str
= { "r", "0" } };
2813 static const TCGTargetOpDef r_r_ri
= { .args_ct_str
= { "r", "r", "ri" } };
2814 static const TCGTargetOpDef r_r_re
= { .args_ct_str
= { "r", "r", "re" } };
2815 static const TCGTargetOpDef r_0_re
= { .args_ct_str
= { "r", "0", "re" } };
2816 static const TCGTargetOpDef r_0_ci
= { .args_ct_str
= { "r", "0", "ci" } };
2817 static const TCGTargetOpDef r_L
= { .args_ct_str
= { "r", "L" } };
2818 static const TCGTargetOpDef L_L
= { .args_ct_str
= { "L", "L" } };
2819 static const TCGTargetOpDef r_L_L
= { .args_ct_str
= { "r", "L", "L" } };
2820 static const TCGTargetOpDef r_r_L
= { .args_ct_str
= { "r", "r", "L" } };
2821 static const TCGTargetOpDef L_L_L
= { .args_ct_str
= { "L", "L", "L" } };
2822 static const TCGTargetOpDef r_r_L_L
2823 = { .args_ct_str
= { "r", "r", "L", "L" } };
2824 static const TCGTargetOpDef L_L_L_L
2825 = { .args_ct_str
= { "L", "L", "L", "L" } };
2826 static const TCGTargetOpDef x_x
= { .args_ct_str
= { "x", "x" } };
2827 static const TCGTargetOpDef x_x_x
= { .args_ct_str
= { "x", "x", "x" } };
2828 static const TCGTargetOpDef x_x_x_x
2829 = { .args_ct_str
= { "x", "x", "x", "x" } };
2830 static const TCGTargetOpDef x_r
= { .args_ct_str
= { "x", "r" } };
2833 case INDEX_op_goto_ptr
:
2836 case INDEX_op_ld8u_i32
:
2837 case INDEX_op_ld8u_i64
:
2838 case INDEX_op_ld8s_i32
:
2839 case INDEX_op_ld8s_i64
:
2840 case INDEX_op_ld16u_i32
:
2841 case INDEX_op_ld16u_i64
:
2842 case INDEX_op_ld16s_i32
:
2843 case INDEX_op_ld16s_i64
:
2844 case INDEX_op_ld_i32
:
2845 case INDEX_op_ld32u_i64
:
2846 case INDEX_op_ld32s_i64
:
2847 case INDEX_op_ld_i64
:
2850 case INDEX_op_st8_i32
:
2851 case INDEX_op_st8_i64
:
2853 case INDEX_op_st16_i32
:
2854 case INDEX_op_st16_i64
:
2855 case INDEX_op_st_i32
:
2856 case INDEX_op_st32_i64
:
2858 case INDEX_op_st_i64
:
2861 case INDEX_op_add_i32
:
2862 case INDEX_op_add_i64
:
2864 case INDEX_op_sub_i32
:
2865 case INDEX_op_sub_i64
:
2866 case INDEX_op_mul_i32
:
2867 case INDEX_op_mul_i64
:
2868 case INDEX_op_or_i32
:
2869 case INDEX_op_or_i64
:
2870 case INDEX_op_xor_i32
:
2871 case INDEX_op_xor_i64
:
2874 case INDEX_op_and_i32
:
2875 case INDEX_op_and_i64
:
2877 static const TCGTargetOpDef
and
2878 = { .args_ct_str
= { "r", "0", "reZ" } };
2882 case INDEX_op_andc_i32
:
2883 case INDEX_op_andc_i64
:
2885 static const TCGTargetOpDef andc
2886 = { .args_ct_str
= { "r", "r", "rI" } };
2891 case INDEX_op_shl_i32
:
2892 case INDEX_op_shl_i64
:
2893 case INDEX_op_shr_i32
:
2894 case INDEX_op_shr_i64
:
2895 case INDEX_op_sar_i32
:
2896 case INDEX_op_sar_i64
:
2897 return have_bmi2
? &r_r_ri
: &r_0_ci
;
2898 case INDEX_op_rotl_i32
:
2899 case INDEX_op_rotl_i64
:
2900 case INDEX_op_rotr_i32
:
2901 case INDEX_op_rotr_i64
:
2904 case INDEX_op_brcond_i32
:
2905 case INDEX_op_brcond_i64
:
2908 case INDEX_op_bswap16_i32
:
2909 case INDEX_op_bswap16_i64
:
2910 case INDEX_op_bswap32_i32
:
2911 case INDEX_op_bswap32_i64
:
2912 case INDEX_op_bswap64_i64
:
2913 case INDEX_op_neg_i32
:
2914 case INDEX_op_neg_i64
:
2915 case INDEX_op_not_i32
:
2916 case INDEX_op_not_i64
:
2919 case INDEX_op_ext8s_i32
:
2920 case INDEX_op_ext8s_i64
:
2921 case INDEX_op_ext8u_i32
:
2922 case INDEX_op_ext8u_i64
:
2924 case INDEX_op_ext16s_i32
:
2925 case INDEX_op_ext16s_i64
:
2926 case INDEX_op_ext16u_i32
:
2927 case INDEX_op_ext16u_i64
:
2928 case INDEX_op_ext32s_i64
:
2929 case INDEX_op_ext32u_i64
:
2930 case INDEX_op_ext_i32_i64
:
2931 case INDEX_op_extu_i32_i64
:
2932 case INDEX_op_extract_i32
:
2933 case INDEX_op_extract_i64
:
2934 case INDEX_op_sextract_i32
:
2935 case INDEX_op_ctpop_i32
:
2936 case INDEX_op_ctpop_i64
:
2939 case INDEX_op_deposit_i32
:
2940 case INDEX_op_deposit_i64
:
2942 static const TCGTargetOpDef dep
2943 = { .args_ct_str
= { "Q", "0", "Q" } };
2946 case INDEX_op_setcond_i32
:
2947 case INDEX_op_setcond_i64
:
2949 static const TCGTargetOpDef setc
2950 = { .args_ct_str
= { "q", "r", "re" } };
2953 case INDEX_op_movcond_i32
:
2954 case INDEX_op_movcond_i64
:
2956 static const TCGTargetOpDef movc
2957 = { .args_ct_str
= { "r", "r", "re", "r", "0" } };
2960 case INDEX_op_div2_i32
:
2961 case INDEX_op_div2_i64
:
2962 case INDEX_op_divu2_i32
:
2963 case INDEX_op_divu2_i64
:
2965 static const TCGTargetOpDef div2
2966 = { .args_ct_str
= { "a", "d", "0", "1", "r" } };
2969 case INDEX_op_mulu2_i32
:
2970 case INDEX_op_mulu2_i64
:
2971 case INDEX_op_muls2_i32
:
2972 case INDEX_op_muls2_i64
:
2974 static const TCGTargetOpDef mul2
2975 = { .args_ct_str
= { "a", "d", "a", "r" } };
2978 case INDEX_op_add2_i32
:
2979 case INDEX_op_add2_i64
:
2980 case INDEX_op_sub2_i32
:
2981 case INDEX_op_sub2_i64
:
2983 static const TCGTargetOpDef arith2
2984 = { .args_ct_str
= { "r", "r", "0", "1", "re", "re" } };
2987 case INDEX_op_ctz_i32
:
2988 case INDEX_op_ctz_i64
:
2990 static const TCGTargetOpDef ctz
[2] = {
2991 { .args_ct_str
= { "&r", "r", "r" } },
2992 { .args_ct_str
= { "&r", "r", "rW" } },
2994 return &ctz
[have_bmi1
];
2996 case INDEX_op_clz_i32
:
2997 case INDEX_op_clz_i64
:
2999 static const TCGTargetOpDef clz
[2] = {
3000 { .args_ct_str
= { "&r", "r", "r" } },
3001 { .args_ct_str
= { "&r", "r", "rW" } },
3003 return &clz
[have_lzcnt
];
3006 case INDEX_op_qemu_ld_i32
:
3007 return TARGET_LONG_BITS
<= TCG_TARGET_REG_BITS
? &r_L
: &r_L_L
;
3008 case INDEX_op_qemu_st_i32
:
3009 return TARGET_LONG_BITS
<= TCG_TARGET_REG_BITS
? &L_L
: &L_L_L
;
3010 case INDEX_op_qemu_ld_i64
:
3011 return (TCG_TARGET_REG_BITS
== 64 ? &r_L
3012 : TARGET_LONG_BITS
<= TCG_TARGET_REG_BITS
? &r_r_L
3014 case INDEX_op_qemu_st_i64
:
3015 return (TCG_TARGET_REG_BITS
== 64 ? &L_L
3016 : TARGET_LONG_BITS
<= TCG_TARGET_REG_BITS
? &L_L_L
3019 case INDEX_op_brcond2_i32
:
3021 static const TCGTargetOpDef b2
3022 = { .args_ct_str
= { "r", "r", "ri", "ri" } };
3025 case INDEX_op_setcond2_i32
:
3027 static const TCGTargetOpDef s2
3028 = { .args_ct_str
= { "r", "r", "r", "ri", "ri" } };
3032 case INDEX_op_ld_vec
:
3033 case INDEX_op_st_vec
:
3036 case INDEX_op_add_vec
:
3037 case INDEX_op_sub_vec
:
3038 case INDEX_op_mul_vec
:
3039 case INDEX_op_and_vec
:
3040 case INDEX_op_or_vec
:
3041 case INDEX_op_xor_vec
:
3042 case INDEX_op_andc_vec
:
3043 case INDEX_op_cmp_vec
:
3044 case INDEX_op_x86_shufps_vec
:
3045 case INDEX_op_x86_blend_vec
:
3046 case INDEX_op_x86_packss_vec
:
3047 case INDEX_op_x86_packus_vec
:
3048 case INDEX_op_x86_vperm2i128_vec
:
3049 case INDEX_op_x86_punpckl_vec
:
3050 case INDEX_op_x86_punpckh_vec
:
3051 #if TCG_TARGET_REG_BITS == 32
3052 case INDEX_op_dup2_vec
:
3055 case INDEX_op_dup_vec
:
3056 case INDEX_op_shli_vec
:
3057 case INDEX_op_shri_vec
:
3058 case INDEX_op_sari_vec
:
3059 case INDEX_op_x86_psrldq_vec
:
3061 case INDEX_op_x86_vpblendvb_vec
:
3070 int tcg_can_emit_vec_op(TCGOpcode opc
, TCGType type
, unsigned vece
)
3073 case INDEX_op_add_vec
:
3074 case INDEX_op_sub_vec
:
3075 case INDEX_op_and_vec
:
3076 case INDEX_op_or_vec
:
3077 case INDEX_op_xor_vec
:
3078 case INDEX_op_andc_vec
:
3080 case INDEX_op_cmp_vec
:
3083 case INDEX_op_shli_vec
:
3084 case INDEX_op_shri_vec
:
3085 /* We must expand the operation for MO_8. */
3086 return vece
== MO_8
? -1 : 1;
3088 case INDEX_op_sari_vec
:
3089 /* We must expand the operation for MO_8. */
3093 /* We can emulate this for MO_64, but it does not pay off
3094 unless we're producing at least 4 values. */
3095 if (vece
== MO_64
) {
3096 return type
>= TCG_TYPE_V256
? -1 : 0;
3100 case INDEX_op_mul_vec
:
3102 /* We can expand the operation for MO_8. */
3105 if (vece
== MO_64
) {
3115 void tcg_expand_vec_op(TCGOpcode opc
, TCGType type
, unsigned vece
,
3120 TCGv_vec v0
, t1
, t2
, t3
, t4
;
3123 v0
= temp_tcgv_vec(arg_temp(a0
));
3126 case INDEX_op_shli_vec
:
3127 case INDEX_op_shri_vec
:
3128 tcg_debug_assert(vece
== MO_8
);
3129 a1
= va_arg(va
, TCGArg
);
3130 a2
= va_arg(va
, TCGArg
);
3131 /* Unpack to W, shift, and repack. Tricky bits:
3132 (1) Use punpck*bw x,x to produce DDCCBBAA,
3133 i.e. duplicate in other half of the 16-bit lane.
3134 (2) For right-shift, add 8 so that the high half of
3135 the lane becomes zero. For left-shift, we must
3136 shift up and down again.
3137 (3) Step 2 leaves high half zero such that PACKUSWB
3138 (pack with unsigned saturation) does not modify
3140 t1
= tcg_temp_new_vec(type
);
3141 t2
= tcg_temp_new_vec(type
);
3142 vec_gen_3(INDEX_op_x86_punpckl_vec
, type
, MO_8
,
3143 tcgv_vec_arg(t1
), a1
, a1
);
3144 vec_gen_3(INDEX_op_x86_punpckh_vec
, type
, MO_8
,
3145 tcgv_vec_arg(t2
), a1
, a1
);
3146 if (opc
== INDEX_op_shri_vec
) {
3147 vec_gen_3(INDEX_op_shri_vec
, type
, MO_16
,
3148 tcgv_vec_arg(t1
), tcgv_vec_arg(t1
), a2
+ 8);
3149 vec_gen_3(INDEX_op_shri_vec
, type
, MO_16
,
3150 tcgv_vec_arg(t2
), tcgv_vec_arg(t2
), a2
+ 8);
3152 vec_gen_3(INDEX_op_shli_vec
, type
, MO_16
,
3153 tcgv_vec_arg(t1
), tcgv_vec_arg(t1
), a2
+ 8);
3154 vec_gen_3(INDEX_op_shli_vec
, type
, MO_16
,
3155 tcgv_vec_arg(t2
), tcgv_vec_arg(t2
), a2
+ 8);
3156 vec_gen_3(INDEX_op_shri_vec
, type
, MO_16
,
3157 tcgv_vec_arg(t1
), tcgv_vec_arg(t1
), 8);
3158 vec_gen_3(INDEX_op_shri_vec
, type
, MO_16
,
3159 tcgv_vec_arg(t2
), tcgv_vec_arg(t2
), 8);
3161 vec_gen_3(INDEX_op_x86_packus_vec
, type
, MO_8
,
3162 a0
, tcgv_vec_arg(t1
), tcgv_vec_arg(t2
));
3163 tcg_temp_free_vec(t1
);
3164 tcg_temp_free_vec(t2
);
3167 case INDEX_op_sari_vec
:
3168 a1
= va_arg(va
, TCGArg
);
3169 a2
= va_arg(va
, TCGArg
);
3171 /* Unpack to W, shift, and repack, as above. */
3172 t1
= tcg_temp_new_vec(type
);
3173 t2
= tcg_temp_new_vec(type
);
3174 vec_gen_3(INDEX_op_x86_punpckl_vec
, type
, MO_8
,
3175 tcgv_vec_arg(t1
), a1
, a1
);
3176 vec_gen_3(INDEX_op_x86_punpckh_vec
, type
, MO_8
,
3177 tcgv_vec_arg(t2
), a1
, a1
);
3178 vec_gen_3(INDEX_op_sari_vec
, type
, MO_16
,
3179 tcgv_vec_arg(t1
), tcgv_vec_arg(t1
), a2
+ 8);
3180 vec_gen_3(INDEX_op_sari_vec
, type
, MO_16
,
3181 tcgv_vec_arg(t2
), tcgv_vec_arg(t2
), a2
+ 8);
3182 vec_gen_3(INDEX_op_x86_packss_vec
, type
, MO_8
,
3183 a0
, tcgv_vec_arg(t1
), tcgv_vec_arg(t2
));
3184 tcg_temp_free_vec(t1
);
3185 tcg_temp_free_vec(t2
);
3188 tcg_debug_assert(vece
== MO_64
);
3189 /* MO_64: If the shift is <= 32, we can emulate the sign extend by
3190 performing an arithmetic 32-bit shift and overwriting the high
3191 half of the result (note that the ISA says shift of 32 is valid). */
3193 t1
= tcg_temp_new_vec(type
);
3194 vec_gen_3(INDEX_op_sari_vec
, type
, MO_32
, tcgv_vec_arg(t1
), a1
, a2
);
3195 vec_gen_3(INDEX_op_shri_vec
, type
, MO_64
, a0
, a1
, a2
);
3196 vec_gen_4(INDEX_op_x86_blend_vec
, type
, MO_32
,
3197 a0
, a0
, tcgv_vec_arg(t1
), 0xaa);
3198 tcg_temp_free_vec(t1
);
3201 /* Otherwise we will need to use a compare vs 0 to produce the
3202 sign-extend, shift and merge. */
3203 t1
= tcg_temp_new_vec(type
);
3204 t2
= tcg_const_zeros_vec(type
);
3205 vec_gen_4(INDEX_op_cmp_vec
, type
, MO_64
,
3206 tcgv_vec_arg(t1
), tcgv_vec_arg(t2
), a1
, TCG_COND_GT
);
3207 tcg_temp_free_vec(t2
);
3208 vec_gen_3(INDEX_op_shri_vec
, type
, MO_64
, a0
, a1
, a2
);
3209 vec_gen_3(INDEX_op_shli_vec
, type
, MO_64
,
3210 tcgv_vec_arg(t1
), tcgv_vec_arg(t1
), 64 - a2
);
3211 vec_gen_3(INDEX_op_or_vec
, type
, MO_64
, a0
, a0
, tcgv_vec_arg(t1
));
3212 tcg_temp_free_vec(t1
);
3215 case INDEX_op_mul_vec
:
3216 tcg_debug_assert(vece
== MO_8
);
3217 a1
= va_arg(va
, TCGArg
);
3218 a2
= va_arg(va
, TCGArg
);
3221 t1
= tcg_temp_new_vec(TCG_TYPE_V128
);
3222 t2
= tcg_temp_new_vec(TCG_TYPE_V128
);
3223 tcg_gen_dup16i_vec(t2
, 0);
3224 vec_gen_3(INDEX_op_x86_punpckl_vec
, TCG_TYPE_V128
, MO_8
,
3225 tcgv_vec_arg(t1
), a1
, tcgv_vec_arg(t2
));
3226 vec_gen_3(INDEX_op_x86_punpckl_vec
, TCG_TYPE_V128
, MO_8
,
3227 tcgv_vec_arg(t2
), tcgv_vec_arg(t2
), a2
);
3228 tcg_gen_mul_vec(MO_16
, t1
, t1
, t2
);
3229 tcg_gen_shri_vec(MO_16
, t1
, t1
, 8);
3230 vec_gen_3(INDEX_op_x86_packus_vec
, TCG_TYPE_V128
, MO_8
,
3231 a0
, tcgv_vec_arg(t1
), tcgv_vec_arg(t1
));
3232 tcg_temp_free_vec(t1
);
3233 tcg_temp_free_vec(t2
);
3237 t1
= tcg_temp_new_vec(TCG_TYPE_V128
);
3238 t2
= tcg_temp_new_vec(TCG_TYPE_V128
);
3239 t3
= tcg_temp_new_vec(TCG_TYPE_V128
);
3240 t4
= tcg_temp_new_vec(TCG_TYPE_V128
);
3241 tcg_gen_dup16i_vec(t4
, 0);
3242 vec_gen_3(INDEX_op_x86_punpckl_vec
, TCG_TYPE_V128
, MO_8
,
3243 tcgv_vec_arg(t1
), a1
, tcgv_vec_arg(t4
));
3244 vec_gen_3(INDEX_op_x86_punpckl_vec
, TCG_TYPE_V128
, MO_8
,
3245 tcgv_vec_arg(t2
), tcgv_vec_arg(t4
), a2
);
3246 vec_gen_3(INDEX_op_x86_punpckh_vec
, TCG_TYPE_V128
, MO_8
,
3247 tcgv_vec_arg(t3
), a1
, tcgv_vec_arg(t4
));
3248 vec_gen_3(INDEX_op_x86_punpckh_vec
, TCG_TYPE_V128
, MO_8
,
3249 tcgv_vec_arg(t4
), tcgv_vec_arg(t4
), a2
);
3250 tcg_gen_mul_vec(MO_16
, t1
, t1
, t2
);
3251 tcg_gen_mul_vec(MO_16
, t3
, t3
, t4
);
3252 tcg_gen_shri_vec(MO_16
, t1
, t1
, 8);
3253 tcg_gen_shri_vec(MO_16
, t3
, t3
, 8);
3254 vec_gen_3(INDEX_op_x86_packus_vec
, TCG_TYPE_V128
, MO_8
,
3255 a0
, tcgv_vec_arg(t1
), tcgv_vec_arg(t3
));
3256 tcg_temp_free_vec(t1
);
3257 tcg_temp_free_vec(t2
);
3258 tcg_temp_free_vec(t3
);
3259 tcg_temp_free_vec(t4
);
3263 t1
= tcg_temp_new_vec(TCG_TYPE_V256
);
3264 t2
= tcg_temp_new_vec(TCG_TYPE_V256
);
3265 t3
= tcg_temp_new_vec(TCG_TYPE_V256
);
3266 t4
= tcg_temp_new_vec(TCG_TYPE_V256
);
3267 tcg_gen_dup16i_vec(t4
, 0);
3268 /* a1: A[0-7] ... D[0-7]; a2: W[0-7] ... Z[0-7]
3269 t1: extends of B[0-7], D[0-7]
3270 t2: extends of X[0-7], Z[0-7]
3271 t3: extends of A[0-7], C[0-7]
3272 t4: extends of W[0-7], Y[0-7]. */
3273 vec_gen_3(INDEX_op_x86_punpckl_vec
, TCG_TYPE_V256
, MO_8
,
3274 tcgv_vec_arg(t1
), a1
, tcgv_vec_arg(t4
));
3275 vec_gen_3(INDEX_op_x86_punpckl_vec
, TCG_TYPE_V256
, MO_8
,
3276 tcgv_vec_arg(t2
), tcgv_vec_arg(t4
), a2
);
3277 vec_gen_3(INDEX_op_x86_punpckh_vec
, TCG_TYPE_V256
, MO_8
,
3278 tcgv_vec_arg(t3
), a1
, tcgv_vec_arg(t4
));
3279 vec_gen_3(INDEX_op_x86_punpckh_vec
, TCG_TYPE_V256
, MO_8
,
3280 tcgv_vec_arg(t4
), tcgv_vec_arg(t4
), a2
);
3281 /* t1: BX DZ; t2: AW CY. */
3282 tcg_gen_mul_vec(MO_16
, t1
, t1
, t2
);
3283 tcg_gen_mul_vec(MO_16
, t3
, t3
, t4
);
3284 tcg_gen_shri_vec(MO_16
, t1
, t1
, 8);
3285 tcg_gen_shri_vec(MO_16
, t3
, t3
, 8);
3286 /* a0: AW BX CY DZ. */
3287 vec_gen_3(INDEX_op_x86_packus_vec
, TCG_TYPE_V256
, MO_8
,
3288 a0
, tcgv_vec_arg(t1
), tcgv_vec_arg(t3
));
3289 tcg_temp_free_vec(t1
);
3290 tcg_temp_free_vec(t2
);
3291 tcg_temp_free_vec(t3
);
3292 tcg_temp_free_vec(t4
);
3296 g_assert_not_reached();
3300 case INDEX_op_cmp_vec
:
3307 static const uint8_t fixups
[16] = {
3310 [TCG_COND_NE
] = NEED_INV
,
3312 [TCG_COND_LT
] = NEED_SWAP
,
3313 [TCG_COND_LE
] = NEED_INV
,
3314 [TCG_COND_GE
] = NEED_SWAP
| NEED_INV
,
3315 [TCG_COND_GTU
] = NEED_BIAS
,
3316 [TCG_COND_LTU
] = NEED_BIAS
| NEED_SWAP
,
3317 [TCG_COND_LEU
] = NEED_BIAS
| NEED_INV
,
3318 [TCG_COND_GEU
] = NEED_BIAS
| NEED_SWAP
| NEED_INV
,
3324 a1
= va_arg(va
, TCGArg
);
3325 a2
= va_arg(va
, TCGArg
);
3326 cond
= va_arg(va
, TCGArg
);
3327 fixup
= fixups
[cond
& 15];
3328 tcg_debug_assert(fixup
!= 0xff);
3330 if (fixup
& NEED_INV
) {
3331 cond
= tcg_invert_cond(cond
);
3333 if (fixup
& NEED_SWAP
) {
3335 t
= a1
, a1
= a2
, a2
= t
;
3336 cond
= tcg_swap_cond(cond
);
3340 if (fixup
& NEED_BIAS
) {
3341 t1
= tcg_temp_new_vec(type
);
3342 t2
= tcg_temp_new_vec(type
);
3343 tcg_gen_dupi_vec(vece
, t2
, 1ull << ((8 << vece
) - 1));
3344 tcg_gen_sub_vec(vece
, t1
, temp_tcgv_vec(arg_temp(a1
)), t2
);
3345 tcg_gen_sub_vec(vece
, t2
, temp_tcgv_vec(arg_temp(a2
)), t2
);
3346 a1
= tcgv_vec_arg(t1
);
3347 a2
= tcgv_vec_arg(t2
);
3348 cond
= tcg_signed_cond(cond
);
3351 tcg_debug_assert(cond
== TCG_COND_EQ
|| cond
== TCG_COND_GT
);
3352 vec_gen_4(INDEX_op_cmp_vec
, type
, vece
, a0
, a1
, a2
, cond
);
3354 if (fixup
& NEED_BIAS
) {
3355 tcg_temp_free_vec(t1
);
3356 tcg_temp_free_vec(t2
);
3358 if (fixup
& NEED_INV
) {
3359 tcg_gen_not_vec(vece
, v0
, v0
);
3371 static const int tcg_target_callee_save_regs
[] = {
3372 #if TCG_TARGET_REG_BITS == 64
3381 TCG_REG_R14
, /* Currently used for the global env. */
3384 TCG_REG_EBP
, /* Currently used for the global env. */
3391 /* Compute frame size via macros, to share between tcg_target_qemu_prologue
3392 and tcg_register_jit. */
3395 ((1 + ARRAY_SIZE(tcg_target_callee_save_regs)) \
3396 * (TCG_TARGET_REG_BITS / 8))
3398 #define FRAME_SIZE \
3400 + TCG_STATIC_CALL_ARGS_SIZE \
3401 + CPU_TEMP_BUF_NLONGS * sizeof(long) \
3402 + TCG_TARGET_STACK_ALIGN - 1) \
3403 & ~(TCG_TARGET_STACK_ALIGN - 1))
3405 /* Generate global QEMU prologue and epilogue code */
3406 static void tcg_target_qemu_prologue(TCGContext
*s
)
3408 int i
, stack_addend
;
3412 /* Reserve some stack space, also for TCG temps. */
3413 stack_addend
= FRAME_SIZE
- PUSH_SIZE
;
3414 tcg_set_frame(s
, TCG_REG_CALL_STACK
, TCG_STATIC_CALL_ARGS_SIZE
,
3415 CPU_TEMP_BUF_NLONGS
* sizeof(long));
3417 /* Save all callee saved registers. */
3418 for (i
= 0; i
< ARRAY_SIZE(tcg_target_callee_save_regs
); i
++) {
3419 tcg_out_push(s
, tcg_target_callee_save_regs
[i
]);
3422 #if TCG_TARGET_REG_BITS == 32
3423 tcg_out_ld(s
, TCG_TYPE_PTR
, TCG_AREG0
, TCG_REG_ESP
,
3424 (ARRAY_SIZE(tcg_target_callee_save_regs
) + 1) * 4);
3425 tcg_out_addi(s
, TCG_REG_ESP
, -stack_addend
);
3427 tcg_out_modrm_offset(s
, OPC_GRP5
, EXT5_JMPN_Ev
, TCG_REG_ESP
,
3428 (ARRAY_SIZE(tcg_target_callee_save_regs
) + 2) * 4
3431 tcg_out_mov(s
, TCG_TYPE_PTR
, TCG_AREG0
, tcg_target_call_iarg_regs
[0]);
3432 tcg_out_addi(s
, TCG_REG_ESP
, -stack_addend
);
3434 tcg_out_modrm(s
, OPC_GRP5
, EXT5_JMPN_Ev
, tcg_target_call_iarg_regs
[1]);
3438 * Return path for goto_ptr. Set return value to 0, a-la exit_tb,
3439 * and fall through to the rest of the epilogue.
3441 s
->code_gen_epilogue
= s
->code_ptr
;
3442 tcg_out_movi(s
, TCG_TYPE_REG
, TCG_REG_EAX
, 0);
3445 tb_ret_addr
= s
->code_ptr
;
3447 tcg_out_addi(s
, TCG_REG_CALL_STACK
, stack_addend
);
3450 tcg_out_vex_opc(s
, OPC_VZEROUPPER
, 0, 0, 0, 0);
3452 for (i
= ARRAY_SIZE(tcg_target_callee_save_regs
) - 1; i
>= 0; i
--) {
3453 tcg_out_pop(s
, tcg_target_callee_save_regs
[i
]);
3455 tcg_out_opc(s
, OPC_RET
, 0, 0, 0);
3457 #if !defined(CONFIG_SOFTMMU)
3458 /* Try to set up a segment register to point to guest_base. */
3460 setup_guest_base_seg();
3465 static void tcg_out_nop_fill(tcg_insn_unit
*p
, int count
)
3467 memset(p
, 0x90, count
);
3470 static void tcg_target_init(TCGContext
*s
)
3472 #ifdef CONFIG_CPUID_H
3473 unsigned a
, b
, c
, d
, b7
= 0;
3474 int max
= __get_cpuid_max(0, 0);
3477 /* BMI1 is available on AMD Piledriver and Intel Haswell CPUs. */
3478 __cpuid_count(7, 0, a
, b7
, c
, d
);
3479 have_bmi1
= (b7
& bit_BMI
) != 0;
3480 have_bmi2
= (b7
& bit_BMI2
) != 0;
3484 __cpuid(1, a
, b
, c
, d
);
3486 /* For 32-bit, 99% certainty that we're running on hardware that
3487 supports cmov, but we still need to check. In case cmov is not
3488 available, we'll use a small forward branch. */
3489 have_cmov
= (d
& bit_CMOV
) != 0;
3492 /* MOVBE is only available on Intel Atom and Haswell CPUs, so we
3493 need to probe for it. */
3494 have_movbe
= (c
& bit_MOVBE
) != 0;
3495 have_popcnt
= (c
& bit_POPCNT
) != 0;
3497 /* There are a number of things we must check before we can be
3498 sure of not hitting invalid opcode. */
3499 if (c
& bit_OSXSAVE
) {
3500 unsigned xcrl
, xcrh
;
3501 /* The xgetbv instruction is not available to older versions of
3502 * the assembler, so we encode the instruction manually.
3504 asm(".byte 0x0f, 0x01, 0xd0" : "=a" (xcrl
), "=d" (xcrh
) : "c" (0));
3505 if ((xcrl
& 6) == 6) {
3506 have_avx1
= (c
& bit_AVX
) != 0;
3507 have_avx2
= (b7
& bit_AVX2
) != 0;
3512 max
= __get_cpuid_max(0x8000000, 0);
3514 __cpuid(0x80000001, a
, b
, c
, d
);
3515 /* LZCNT was introduced with AMD Barcelona and Intel Haswell CPUs. */
3516 have_lzcnt
= (c
& bit_LZCNT
) != 0;
3518 #endif /* CONFIG_CPUID_H */
3520 tcg_target_available_regs
[TCG_TYPE_I32
] = ALL_GENERAL_REGS
;
3521 if (TCG_TARGET_REG_BITS
== 64) {
3522 tcg_target_available_regs
[TCG_TYPE_I64
] = ALL_GENERAL_REGS
;
3525 tcg_target_available_regs
[TCG_TYPE_V64
] = ALL_VECTOR_REGS
;
3526 tcg_target_available_regs
[TCG_TYPE_V128
] = ALL_VECTOR_REGS
;
3529 tcg_target_available_regs
[TCG_TYPE_V256
] = ALL_VECTOR_REGS
;
3532 tcg_target_call_clobber_regs
= ALL_VECTOR_REGS
;
3533 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_EAX
);
3534 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_EDX
);
3535 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_ECX
);
3536 if (TCG_TARGET_REG_BITS
== 64) {
3537 #if !defined(_WIN64)
3538 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_RDI
);
3539 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_RSI
);
3541 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R8
);
3542 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R9
);
3543 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R10
);
3544 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R11
);
3547 s
->reserved_regs
= 0;
3548 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_CALL_STACK
);
3553 uint8_t fde_def_cfa
[4];
3554 uint8_t fde_reg_ofs
[14];
3557 /* We're expecting a 2 byte uleb128 encoded value. */
3558 QEMU_BUILD_BUG_ON(FRAME_SIZE
>= (1 << 14));
3560 #if !defined(__ELF__)
3561 /* Host machine without ELF. */
3562 #elif TCG_TARGET_REG_BITS == 64
3563 #define ELF_HOST_MACHINE EM_X86_64
3564 static const DebugFrame debug_frame
= {
3565 .h
.cie
.len
= sizeof(DebugFrameCIE
)-4, /* length after .len member */
3568 .h
.cie
.code_align
= 1,
3569 .h
.cie
.data_align
= 0x78, /* sleb128 -8 */
3570 .h
.cie
.return_column
= 16,
3572 /* Total FDE size does not include the "len" member. */
3573 .h
.fde
.len
= sizeof(DebugFrame
) - offsetof(DebugFrame
, h
.fde
.cie_offset
),
3576 12, 7, /* DW_CFA_def_cfa %rsp, ... */
3577 (FRAME_SIZE
& 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
3581 0x90, 1, /* DW_CFA_offset, %rip, -8 */
3582 /* The following ordering must match tcg_target_callee_save_regs. */
3583 0x86, 2, /* DW_CFA_offset, %rbp, -16 */
3584 0x83, 3, /* DW_CFA_offset, %rbx, -24 */
3585 0x8c, 4, /* DW_CFA_offset, %r12, -32 */
3586 0x8d, 5, /* DW_CFA_offset, %r13, -40 */
3587 0x8e, 6, /* DW_CFA_offset, %r14, -48 */
3588 0x8f, 7, /* DW_CFA_offset, %r15, -56 */
3592 #define ELF_HOST_MACHINE EM_386
3593 static const DebugFrame debug_frame
= {
3594 .h
.cie
.len
= sizeof(DebugFrameCIE
)-4, /* length after .len member */
3597 .h
.cie
.code_align
= 1,
3598 .h
.cie
.data_align
= 0x7c, /* sleb128 -4 */
3599 .h
.cie
.return_column
= 8,
3601 /* Total FDE size does not include the "len" member. */
3602 .h
.fde
.len
= sizeof(DebugFrame
) - offsetof(DebugFrame
, h
.fde
.cie_offset
),
3605 12, 4, /* DW_CFA_def_cfa %esp, ... */
3606 (FRAME_SIZE
& 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
3610 0x88, 1, /* DW_CFA_offset, %eip, -4 */
3611 /* The following ordering must match tcg_target_callee_save_regs. */
3612 0x85, 2, /* DW_CFA_offset, %ebp, -8 */
3613 0x83, 3, /* DW_CFA_offset, %ebx, -12 */
3614 0x86, 4, /* DW_CFA_offset, %esi, -16 */
3615 0x87, 5, /* DW_CFA_offset, %edi, -20 */
3620 #if defined(ELF_HOST_MACHINE)
3621 void tcg_register_jit(void *buf
, size_t buf_size
)
3623 tcg_register_jit_int(buf
, buf_size
, &debug_frame
, sizeof(debug_frame
));