2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2009 Ulrich Hecht <uli@suse.de>
5 * Copyright (c) 2009 Alexander Graf <agraf@suse.de>
6 * Copyright (c) 2010 Richard Henderson <rth@twiddle.net>
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
27 #include "tcg-be-ldst.h"
29 /* We only support generating code for 64-bit mode. */
30 #if TCG_TARGET_REG_BITS != 64
31 #error "unsupported code generation mode"
36 /* ??? The translation blocks produced by TCG are generally small enough to
37 be entirely reachable with a 16-bit displacement. Leaving the option for
38 a 32-bit displacement here Just In Case. */
39 #define USE_LONG_BRANCHES 0
41 #define TCG_CT_CONST_MULI 0x100
42 #define TCG_CT_CONST_ORI 0x200
43 #define TCG_CT_CONST_XORI 0x400
44 #define TCG_CT_CONST_CMPI 0x800
45 #define TCG_CT_CONST_ADLI 0x1000
47 /* Several places within the instruction set 0 means "no register"
48 rather than TCG_REG_R0. */
49 #define TCG_REG_NONE 0
51 /* A scratch register that may be be used throughout the backend. */
52 #define TCG_TMP0 TCG_REG_R14
54 #ifdef CONFIG_USE_GUEST_BASE
55 #define TCG_GUEST_BASE_REG TCG_REG_R13
57 #define TCG_GUEST_BASE_REG TCG_REG_R0
65 /* All of the following instructions are prefixed with their instruction
66 format, and are defined as 8- or 16-bit quantities, even when the two
67 halves of the 16-bit quantity may appear 32 bits apart in the insn.
68 This makes it easy to copy the values from the tables in Appendix B. */
69 typedef enum S390Opcode
{
232 static const char * const tcg_target_reg_names
[TCG_TARGET_NB_REGS
] = {
233 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
234 "%r8", "%r9", "%r10" "%r11" "%r12" "%r13" "%r14" "%r15"
238 /* Since R6 is a potential argument register, choose it last of the
239 call-saved registers. Likewise prefer the call-clobbered registers
240 in reverse order to maximize the chance of avoiding the arguments. */
241 static const int tcg_target_reg_alloc_order
[] = {
242 /* Call saved registers. */
251 /* Call clobbered registers. */
255 /* Argument registers, in reverse order of allocation. */
262 static const int tcg_target_call_iarg_regs
[] = {
270 static const int tcg_target_call_oarg_regs
[] = {
278 #define S390_CC_NE (S390_CC_LT | S390_CC_GT)
279 #define S390_CC_LE (S390_CC_LT | S390_CC_EQ)
280 #define S390_CC_GE (S390_CC_GT | S390_CC_EQ)
281 #define S390_CC_NEVER 0
282 #define S390_CC_ALWAYS 15
284 /* Condition codes that result from a COMPARE and COMPARE LOGICAL. */
285 static const uint8_t tcg_cond_to_s390_cond
[] = {
286 [TCG_COND_EQ
] = S390_CC_EQ
,
287 [TCG_COND_NE
] = S390_CC_NE
,
288 [TCG_COND_LT
] = S390_CC_LT
,
289 [TCG_COND_LE
] = S390_CC_LE
,
290 [TCG_COND_GT
] = S390_CC_GT
,
291 [TCG_COND_GE
] = S390_CC_GE
,
292 [TCG_COND_LTU
] = S390_CC_LT
,
293 [TCG_COND_LEU
] = S390_CC_LE
,
294 [TCG_COND_GTU
] = S390_CC_GT
,
295 [TCG_COND_GEU
] = S390_CC_GE
,
298 /* Condition codes that result from a LOAD AND TEST. Here, we have no
299 unsigned instruction variation, however since the test is vs zero we
300 can re-map the outcomes appropriately. */
301 static const uint8_t tcg_cond_to_ltr_cond
[] = {
302 [TCG_COND_EQ
] = S390_CC_EQ
,
303 [TCG_COND_NE
] = S390_CC_NE
,
304 [TCG_COND_LT
] = S390_CC_LT
,
305 [TCG_COND_LE
] = S390_CC_LE
,
306 [TCG_COND_GT
] = S390_CC_GT
,
307 [TCG_COND_GE
] = S390_CC_GE
,
308 [TCG_COND_LTU
] = S390_CC_NEVER
,
309 [TCG_COND_LEU
] = S390_CC_EQ
,
310 [TCG_COND_GTU
] = S390_CC_NE
,
311 [TCG_COND_GEU
] = S390_CC_ALWAYS
,
314 #ifdef CONFIG_SOFTMMU
315 static void * const qemu_ld_helpers
[16] = {
316 [MO_UB
] = helper_ret_ldub_mmu
,
317 [MO_SB
] = helper_ret_ldsb_mmu
,
318 [MO_LEUW
] = helper_le_lduw_mmu
,
319 [MO_LESW
] = helper_le_ldsw_mmu
,
320 [MO_LEUL
] = helper_le_ldul_mmu
,
321 [MO_LESL
] = helper_le_ldsl_mmu
,
322 [MO_LEQ
] = helper_le_ldq_mmu
,
323 [MO_BEUW
] = helper_be_lduw_mmu
,
324 [MO_BESW
] = helper_be_ldsw_mmu
,
325 [MO_BEUL
] = helper_be_ldul_mmu
,
326 [MO_BESL
] = helper_be_ldsl_mmu
,
327 [MO_BEQ
] = helper_be_ldq_mmu
,
330 static void * const qemu_st_helpers
[16] = {
331 [MO_UB
] = helper_ret_stb_mmu
,
332 [MO_LEUW
] = helper_le_stw_mmu
,
333 [MO_LEUL
] = helper_le_stl_mmu
,
334 [MO_LEQ
] = helper_le_stq_mmu
,
335 [MO_BEUW
] = helper_be_stw_mmu
,
336 [MO_BEUL
] = helper_be_stl_mmu
,
337 [MO_BEQ
] = helper_be_stq_mmu
,
341 static tcg_insn_unit
*tb_ret_addr
;
343 /* A list of relevant facilities used by this translator. Some of these
344 are required for proper operation, and these are checked at startup. */
346 #define FACILITY_ZARCH_ACTIVE (1ULL << (63 - 2))
347 #define FACILITY_LONG_DISP (1ULL << (63 - 18))
348 #define FACILITY_EXT_IMM (1ULL << (63 - 21))
349 #define FACILITY_GEN_INST_EXT (1ULL << (63 - 34))
350 #define FACILITY_LOAD_ON_COND (1ULL << (63 - 45))
352 static uint64_t facilities
;
354 static void patch_reloc(tcg_insn_unit
*code_ptr
, int type
,
355 intptr_t value
, intptr_t addend
)
357 intptr_t pcrel2
= (tcg_insn_unit
*)value
- (code_ptr
- 1);
358 assert(addend
== -2);
362 assert(pcrel2
== (int16_t)pcrel2
);
363 tcg_patch16(code_ptr
, pcrel2
);
366 assert(pcrel2
== (int32_t)pcrel2
);
367 tcg_patch32(code_ptr
, pcrel2
);
375 /* parse target specific constraints */
376 static int target_parse_constraint(TCGArgConstraint
*ct
, const char **pct_str
)
378 const char *ct_str
= *pct_str
;
381 case 'r': /* all registers */
382 ct
->ct
|= TCG_CT_REG
;
383 tcg_regset_set32(ct
->u
.regs
, 0, 0xffff);
385 case 'R': /* not R0 */
386 ct
->ct
|= TCG_CT_REG
;
387 tcg_regset_set32(ct
->u
.regs
, 0, 0xffff);
388 tcg_regset_reset_reg(ct
->u
.regs
, TCG_REG_R0
);
390 case 'L': /* qemu_ld/st constraint */
391 ct
->ct
|= TCG_CT_REG
;
392 tcg_regset_set32(ct
->u
.regs
, 0, 0xffff);
393 tcg_regset_reset_reg (ct
->u
.regs
, TCG_REG_R2
);
394 tcg_regset_reset_reg (ct
->u
.regs
, TCG_REG_R3
);
395 tcg_regset_reset_reg (ct
->u
.regs
, TCG_REG_R4
);
397 case 'a': /* force R2 for division */
398 ct
->ct
|= TCG_CT_REG
;
399 tcg_regset_clear(ct
->u
.regs
);
400 tcg_regset_set_reg(ct
->u
.regs
, TCG_REG_R2
);
402 case 'b': /* force R3 for division */
403 ct
->ct
|= TCG_CT_REG
;
404 tcg_regset_clear(ct
->u
.regs
);
405 tcg_regset_set_reg(ct
->u
.regs
, TCG_REG_R3
);
408 ct
->ct
|= TCG_CT_CONST_ADLI
;
411 ct
->ct
|= TCG_CT_CONST_MULI
;
414 ct
->ct
|= TCG_CT_CONST_ORI
;
417 ct
->ct
|= TCG_CT_CONST_XORI
;
420 ct
->ct
|= TCG_CT_CONST_CMPI
;
431 /* Immediates to be used with logical OR. This is an optimization only,
432 since a full 64-bit immediate OR can always be performed with 4 sequential
433 OI[LH][LH] instructions. What we're looking for is immediates that we
434 can load efficiently, and the immediate load plus the reg-reg OR is
435 smaller than the sequential OI's. */
437 static int tcg_match_ori(TCGType type
, tcg_target_long val
)
439 if (facilities
& FACILITY_EXT_IMM
) {
440 if (type
== TCG_TYPE_I32
) {
441 /* All 32-bit ORs can be performed with 1 48-bit insn. */
446 /* Look for negative values. These are best to load with LGHI. */
448 if (val
== (int16_t)val
) {
451 if (facilities
& FACILITY_EXT_IMM
) {
452 if (val
== (int32_t)val
) {
461 /* Immediates to be used with logical XOR. This is almost, but not quite,
462 only an optimization. XOR with immediate is only supported with the
463 extended-immediate facility. That said, there are a few patterns for
464 which it is better to load the value into a register first. */
466 static int tcg_match_xori(TCGType type
, tcg_target_long val
)
468 if ((facilities
& FACILITY_EXT_IMM
) == 0) {
472 if (type
== TCG_TYPE_I32
) {
473 /* All 32-bit XORs can be performed with 1 48-bit insn. */
477 /* Look for negative values. These are best to load with LGHI. */
478 if (val
< 0 && val
== (int32_t)val
) {
485 /* Imediates to be used with comparisons. */
487 static int tcg_match_cmpi(TCGType type
, tcg_target_long val
)
489 if (facilities
& FACILITY_EXT_IMM
) {
490 /* The COMPARE IMMEDIATE instruction is available. */
491 if (type
== TCG_TYPE_I32
) {
492 /* We have a 32-bit immediate and can compare against anything. */
495 /* ??? We have no insight here into whether the comparison is
496 signed or unsigned. The COMPARE IMMEDIATE insn uses a 32-bit
497 signed immediate, and the COMPARE LOGICAL IMMEDIATE insn uses
498 a 32-bit unsigned immediate. If we were to use the (semi)
499 obvious "val == (int32_t)val" we would be enabling unsigned
500 comparisons vs very large numbers. The only solution is to
501 take the intersection of the ranges. */
502 /* ??? Another possible solution is to simply lie and allow all
503 constants here and force the out-of-range values into a temp
504 register in tgen_cmp when we have knowledge of the actual
505 comparison code in use. */
506 return val
>= 0 && val
<= 0x7fffffff;
509 /* Only the LOAD AND TEST instruction is available. */
514 /* Immediates to be used with add2/sub2. */
516 static int tcg_match_add2i(TCGType type
, tcg_target_long val
)
518 if (facilities
& FACILITY_EXT_IMM
) {
519 if (type
== TCG_TYPE_I32
) {
521 } else if (val
>= -0xffffffffll
&& val
<= 0xffffffffll
) {
528 /* Test if a constant matches the constraint. */
529 static int tcg_target_const_match(tcg_target_long val
, TCGType type
,
530 const TCGArgConstraint
*arg_ct
)
534 if (ct
& TCG_CT_CONST
) {
538 if (type
== TCG_TYPE_I32
) {
542 /* The following are mutually exclusive. */
543 if (ct
& TCG_CT_CONST_MULI
) {
544 /* Immediates that may be used with multiply. If we have the
545 general-instruction-extensions, then we have MULTIPLY SINGLE
546 IMMEDIATE with a signed 32-bit, otherwise we have only
547 MULTIPLY HALFWORD IMMEDIATE, with a signed 16-bit. */
548 if (facilities
& FACILITY_GEN_INST_EXT
) {
549 return val
== (int32_t)val
;
551 return val
== (int16_t)val
;
553 } else if (ct
& TCG_CT_CONST_ADLI
) {
554 return tcg_match_add2i(type
, val
);
555 } else if (ct
& TCG_CT_CONST_ORI
) {
556 return tcg_match_ori(type
, val
);
557 } else if (ct
& TCG_CT_CONST_XORI
) {
558 return tcg_match_xori(type
, val
);
559 } else if (ct
& TCG_CT_CONST_CMPI
) {
560 return tcg_match_cmpi(type
, val
);
566 /* Emit instructions according to the given instruction format. */
568 static void tcg_out_insn_RR(TCGContext
*s
, S390Opcode op
, TCGReg r1
, TCGReg r2
)
570 tcg_out16(s
, (op
<< 8) | (r1
<< 4) | r2
);
573 static void tcg_out_insn_RRE(TCGContext
*s
, S390Opcode op
,
574 TCGReg r1
, TCGReg r2
)
576 tcg_out32(s
, (op
<< 16) | (r1
<< 4) | r2
);
579 static void tcg_out_insn_RRF(TCGContext
*s
, S390Opcode op
,
580 TCGReg r1
, TCGReg r2
, int m3
)
582 tcg_out32(s
, (op
<< 16) | (m3
<< 12) | (r1
<< 4) | r2
);
585 static void tcg_out_insn_RI(TCGContext
*s
, S390Opcode op
, TCGReg r1
, int i2
)
587 tcg_out32(s
, (op
<< 16) | (r1
<< 20) | (i2
& 0xffff));
590 static void tcg_out_insn_RIL(TCGContext
*s
, S390Opcode op
, TCGReg r1
, int i2
)
592 tcg_out16(s
, op
| (r1
<< 4));
596 static void tcg_out_insn_RS(TCGContext
*s
, S390Opcode op
, TCGReg r1
,
597 TCGReg b2
, TCGReg r3
, int disp
)
599 tcg_out32(s
, (op
<< 24) | (r1
<< 20) | (r3
<< 16) | (b2
<< 12)
603 static void tcg_out_insn_RSY(TCGContext
*s
, S390Opcode op
, TCGReg r1
,
604 TCGReg b2
, TCGReg r3
, int disp
)
606 tcg_out16(s
, (op
& 0xff00) | (r1
<< 4) | r3
);
607 tcg_out32(s
, (op
& 0xff) | (b2
<< 28)
608 | ((disp
& 0xfff) << 16) | ((disp
& 0xff000) >> 4));
611 #define tcg_out_insn_RX tcg_out_insn_RS
612 #define tcg_out_insn_RXY tcg_out_insn_RSY
614 /* Emit an opcode with "type-checking" of the format. */
615 #define tcg_out_insn(S, FMT, OP, ...) \
616 glue(tcg_out_insn_,FMT)(S, glue(glue(FMT,_),OP), ## __VA_ARGS__)
619 /* emit 64-bit shifts */
620 static void tcg_out_sh64(TCGContext
* s
, S390Opcode op
, TCGReg dest
,
621 TCGReg src
, TCGReg sh_reg
, int sh_imm
)
623 tcg_out_insn_RSY(s
, op
, dest
, sh_reg
, src
, sh_imm
);
626 /* emit 32-bit shifts */
627 static void tcg_out_sh32(TCGContext
* s
, S390Opcode op
, TCGReg dest
,
628 TCGReg sh_reg
, int sh_imm
)
630 tcg_out_insn_RS(s
, op
, dest
, sh_reg
, 0, sh_imm
);
633 static void tcg_out_mov(TCGContext
*s
, TCGType type
, TCGReg dst
, TCGReg src
)
636 if (type
== TCG_TYPE_I32
) {
637 tcg_out_insn(s
, RR
, LR
, dst
, src
);
639 tcg_out_insn(s
, RRE
, LGR
, dst
, src
);
644 /* load a register with an immediate value */
645 static void tcg_out_movi(TCGContext
*s
, TCGType type
,
646 TCGReg ret
, tcg_target_long sval
)
648 static const S390Opcode lli_insns
[4] = {
649 RI_LLILL
, RI_LLILH
, RI_LLIHL
, RI_LLIHH
652 tcg_target_ulong uval
= sval
;
655 if (type
== TCG_TYPE_I32
) {
656 uval
= (uint32_t)sval
;
657 sval
= (int32_t)sval
;
660 /* Try all 32-bit insns that can load it in one go. */
661 if (sval
>= -0x8000 && sval
< 0x8000) {
662 tcg_out_insn(s
, RI
, LGHI
, ret
, sval
);
666 for (i
= 0; i
< 4; i
++) {
667 tcg_target_long mask
= 0xffffull
<< i
*16;
668 if ((uval
& mask
) == uval
) {
669 tcg_out_insn_RI(s
, lli_insns
[i
], ret
, uval
>> i
*16);
674 /* Try all 48-bit insns that can load it in one go. */
675 if (facilities
& FACILITY_EXT_IMM
) {
676 if (sval
== (int32_t)sval
) {
677 tcg_out_insn(s
, RIL
, LGFI
, ret
, sval
);
680 if (uval
<= 0xffffffff) {
681 tcg_out_insn(s
, RIL
, LLILF
, ret
, uval
);
684 if ((uval
& 0xffffffff) == 0) {
685 tcg_out_insn(s
, RIL
, LLIHF
, ret
, uval
>> 31 >> 1);
690 /* Try for PC-relative address load. */
691 if ((sval
& 1) == 0) {
692 ptrdiff_t off
= tcg_pcrel_diff(s
, (void *)sval
) >> 1;
693 if (off
== (int32_t)off
) {
694 tcg_out_insn(s
, RIL
, LARL
, ret
, off
);
699 /* If extended immediates are not present, then we may have to issue
700 several instructions to load the low 32 bits. */
701 if (!(facilities
& FACILITY_EXT_IMM
)) {
702 /* A 32-bit unsigned value can be loaded in 2 insns. And given
703 that the lli_insns loop above did not succeed, we know that
704 both insns are required. */
705 if (uval
<= 0xffffffff) {
706 tcg_out_insn(s
, RI
, LLILL
, ret
, uval
);
707 tcg_out_insn(s
, RI
, IILH
, ret
, uval
>> 16);
711 /* If all high bits are set, the value can be loaded in 2 or 3 insns.
712 We first want to make sure that all the high bits get set. With
713 luck the low 16-bits can be considered negative to perform that for
714 free, otherwise we load an explicit -1. */
715 if (sval
>> 31 >> 1 == -1) {
717 tcg_out_insn(s
, RI
, LGHI
, ret
, uval
);
719 tcg_out_insn(s
, RI
, LGHI
, ret
, -1);
720 tcg_out_insn(s
, RI
, IILL
, ret
, uval
);
722 tcg_out_insn(s
, RI
, IILH
, ret
, uval
>> 16);
727 /* If we get here, both the high and low parts have non-zero bits. */
729 /* Recurse to load the lower 32-bits. */
730 tcg_out_movi(s
, TCG_TYPE_I64
, ret
, uval
& 0xffffffff);
732 /* Insert data into the high 32-bits. */
733 uval
= uval
>> 31 >> 1;
734 if (facilities
& FACILITY_EXT_IMM
) {
735 if (uval
< 0x10000) {
736 tcg_out_insn(s
, RI
, IIHL
, ret
, uval
);
737 } else if ((uval
& 0xffff) == 0) {
738 tcg_out_insn(s
, RI
, IIHH
, ret
, uval
>> 16);
740 tcg_out_insn(s
, RIL
, IIHF
, ret
, uval
);
744 tcg_out_insn(s
, RI
, IIHL
, ret
, uval
);
746 if (uval
& 0xffff0000) {
747 tcg_out_insn(s
, RI
, IIHH
, ret
, uval
>> 16);
753 /* Emit a load/store type instruction. Inputs are:
754 DATA: The register to be loaded or stored.
755 BASE+OFS: The effective address.
756 OPC_RX: If the operation has an RX format opcode (e.g. STC), otherwise 0.
757 OPC_RXY: The RXY format opcode for the operation (e.g. STCY). */
759 static void tcg_out_mem(TCGContext
*s
, S390Opcode opc_rx
, S390Opcode opc_rxy
,
760 TCGReg data
, TCGReg base
, TCGReg index
,
763 if (ofs
< -0x80000 || ofs
>= 0x80000) {
764 /* Combine the low 20 bits of the offset with the actual load insn;
765 the high 44 bits must come from an immediate load. */
766 tcg_target_long low
= ((ofs
& 0xfffff) ^ 0x80000) - 0x80000;
767 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_TMP0
, ofs
- low
);
770 /* If we were already given an index register, add it in. */
771 if (index
!= TCG_REG_NONE
) {
772 tcg_out_insn(s
, RRE
, AGR
, TCG_TMP0
, index
);
777 if (opc_rx
&& ofs
>= 0 && ofs
< 0x1000) {
778 tcg_out_insn_RX(s
, opc_rx
, data
, base
, index
, ofs
);
780 tcg_out_insn_RXY(s
, opc_rxy
, data
, base
, index
, ofs
);
785 /* load data without address translation or endianness conversion */
786 static inline void tcg_out_ld(TCGContext
*s
, TCGType type
, TCGReg data
,
787 TCGReg base
, intptr_t ofs
)
789 if (type
== TCG_TYPE_I32
) {
790 tcg_out_mem(s
, RX_L
, RXY_LY
, data
, base
, TCG_REG_NONE
, ofs
);
792 tcg_out_mem(s
, 0, RXY_LG
, data
, base
, TCG_REG_NONE
, ofs
);
796 static inline void tcg_out_st(TCGContext
*s
, TCGType type
, TCGReg data
,
797 TCGReg base
, intptr_t ofs
)
799 if (type
== TCG_TYPE_I32
) {
800 tcg_out_mem(s
, RX_ST
, RXY_STY
, data
, base
, TCG_REG_NONE
, ofs
);
802 tcg_out_mem(s
, 0, RXY_STG
, data
, base
, TCG_REG_NONE
, ofs
);
806 /* load data from an absolute host address */
807 static void tcg_out_ld_abs(TCGContext
*s
, TCGType type
, TCGReg dest
, void *abs
)
809 intptr_t addr
= (intptr_t)abs
;
811 if ((facilities
& FACILITY_GEN_INST_EXT
) && !(addr
& 1)) {
812 ptrdiff_t disp
= tcg_pcrel_diff(s
, abs
) >> 1;
813 if (disp
== (int32_t)disp
) {
814 if (type
== TCG_TYPE_I32
) {
815 tcg_out_insn(s
, RIL
, LRL
, dest
, disp
);
817 tcg_out_insn(s
, RIL
, LGRL
, dest
, disp
);
823 tcg_out_movi(s
, TCG_TYPE_PTR
, dest
, addr
& ~0xffff);
824 tcg_out_ld(s
, type
, dest
, dest
, addr
& 0xffff);
827 static inline void tcg_out_risbg(TCGContext
*s
, TCGReg dest
, TCGReg src
,
828 int msb
, int lsb
, int ofs
, int z
)
831 tcg_out16(s
, (RIE_RISBG
& 0xff00) | (dest
<< 4) | src
);
832 tcg_out16(s
, (msb
<< 8) | (z
<< 7) | lsb
);
833 tcg_out16(s
, (ofs
<< 8) | (RIE_RISBG
& 0xff));
836 static void tgen_ext8s(TCGContext
*s
, TCGType type
, TCGReg dest
, TCGReg src
)
838 if (facilities
& FACILITY_EXT_IMM
) {
839 tcg_out_insn(s
, RRE
, LGBR
, dest
, src
);
843 if (type
== TCG_TYPE_I32
) {
845 tcg_out_sh32(s
, RS_SLL
, dest
, TCG_REG_NONE
, 24);
847 tcg_out_sh64(s
, RSY_SLLG
, dest
, src
, TCG_REG_NONE
, 24);
849 tcg_out_sh32(s
, RS_SRA
, dest
, TCG_REG_NONE
, 24);
851 tcg_out_sh64(s
, RSY_SLLG
, dest
, src
, TCG_REG_NONE
, 56);
852 tcg_out_sh64(s
, RSY_SRAG
, dest
, dest
, TCG_REG_NONE
, 56);
856 static void tgen_ext8u(TCGContext
*s
, TCGType type
, TCGReg dest
, TCGReg src
)
858 if (facilities
& FACILITY_EXT_IMM
) {
859 tcg_out_insn(s
, RRE
, LLGCR
, dest
, src
);
864 tcg_out_movi(s
, type
, TCG_TMP0
, 0xff);
867 tcg_out_movi(s
, type
, dest
, 0xff);
869 if (type
== TCG_TYPE_I32
) {
870 tcg_out_insn(s
, RR
, NR
, dest
, src
);
872 tcg_out_insn(s
, RRE
, NGR
, dest
, src
);
876 static void tgen_ext16s(TCGContext
*s
, TCGType type
, TCGReg dest
, TCGReg src
)
878 if (facilities
& FACILITY_EXT_IMM
) {
879 tcg_out_insn(s
, RRE
, LGHR
, dest
, src
);
883 if (type
== TCG_TYPE_I32
) {
885 tcg_out_sh32(s
, RS_SLL
, dest
, TCG_REG_NONE
, 16);
887 tcg_out_sh64(s
, RSY_SLLG
, dest
, src
, TCG_REG_NONE
, 16);
889 tcg_out_sh32(s
, RS_SRA
, dest
, TCG_REG_NONE
, 16);
891 tcg_out_sh64(s
, RSY_SLLG
, dest
, src
, TCG_REG_NONE
, 48);
892 tcg_out_sh64(s
, RSY_SRAG
, dest
, dest
, TCG_REG_NONE
, 48);
896 static void tgen_ext16u(TCGContext
*s
, TCGType type
, TCGReg dest
, TCGReg src
)
898 if (facilities
& FACILITY_EXT_IMM
) {
899 tcg_out_insn(s
, RRE
, LLGHR
, dest
, src
);
904 tcg_out_movi(s
, type
, TCG_TMP0
, 0xffff);
907 tcg_out_movi(s
, type
, dest
, 0xffff);
909 if (type
== TCG_TYPE_I32
) {
910 tcg_out_insn(s
, RR
, NR
, dest
, src
);
912 tcg_out_insn(s
, RRE
, NGR
, dest
, src
);
916 static inline void tgen_ext32s(TCGContext
*s
, TCGReg dest
, TCGReg src
)
918 tcg_out_insn(s
, RRE
, LGFR
, dest
, src
);
921 static inline void tgen_ext32u(TCGContext
*s
, TCGReg dest
, TCGReg src
)
923 tcg_out_insn(s
, RRE
, LLGFR
, dest
, src
);
926 /* Accept bit patterns like these:
931 Copied from gcc sources. */
932 static inline bool risbg_mask(uint64_t c
)
935 /* We don't change the number of transitions by inverting,
936 so make sure we start with the LSB zero. */
940 /* Reject all zeros or all ones. */
944 /* Find the first transition. */
946 /* Invert to look for a second transition. */
948 /* Erase the first transition. */
950 /* Find the second transition, if any. */
952 /* Match if all the bits are 1's, or if c is zero. */
956 static void tgen_andi_risbg(TCGContext
*s
, TCGReg out
, TCGReg in
, uint64_t val
)
959 if ((val
& 0x8000000000000001ull
) == 0x8000000000000001ull
) {
960 /* Achieve wraparound by swapping msb and lsb. */
961 msb
= 64 - ctz64(~val
);
962 lsb
= clz64(~val
) - 1;
965 lsb
= 63 - ctz64(val
);
967 tcg_out_risbg(s
, out
, in
, msb
, lsb
, 0, 1);
970 static void tgen_andi(TCGContext
*s
, TCGType type
, TCGReg dest
, uint64_t val
)
972 static const S390Opcode ni_insns
[4] = {
973 RI_NILL
, RI_NILH
, RI_NIHL
, RI_NIHH
975 static const S390Opcode nif_insns
[2] = {
978 uint64_t valid
= (type
== TCG_TYPE_I32
? 0xffffffffull
: -1ull);
981 /* Look for the zero-extensions. */
982 if ((val
& valid
) == 0xffffffff) {
983 tgen_ext32u(s
, dest
, dest
);
986 if (facilities
& FACILITY_EXT_IMM
) {
987 if ((val
& valid
) == 0xff) {
988 tgen_ext8u(s
, TCG_TYPE_I64
, dest
, dest
);
991 if ((val
& valid
) == 0xffff) {
992 tgen_ext16u(s
, TCG_TYPE_I64
, dest
, dest
);
997 /* Try all 32-bit insns that can perform it in one go. */
998 for (i
= 0; i
< 4; i
++) {
999 tcg_target_ulong mask
= ~(0xffffull
<< i
*16);
1000 if (((val
| ~valid
) & mask
) == mask
) {
1001 tcg_out_insn_RI(s
, ni_insns
[i
], dest
, val
>> i
*16);
1006 /* Try all 48-bit insns that can perform it in one go. */
1007 if (facilities
& FACILITY_EXT_IMM
) {
1008 for (i
= 0; i
< 2; i
++) {
1009 tcg_target_ulong mask
= ~(0xffffffffull
<< i
*32);
1010 if (((val
| ~valid
) & mask
) == mask
) {
1011 tcg_out_insn_RIL(s
, nif_insns
[i
], dest
, val
>> i
*32);
1016 if ((facilities
& FACILITY_GEN_INST_EXT
) && risbg_mask(val
)) {
1017 tgen_andi_risbg(s
, dest
, dest
, val
);
1021 /* Fall back to loading the constant. */
1022 tcg_out_movi(s
, type
, TCG_TMP0
, val
);
1023 if (type
== TCG_TYPE_I32
) {
1024 tcg_out_insn(s
, RR
, NR
, dest
, TCG_TMP0
);
1026 tcg_out_insn(s
, RRE
, NGR
, dest
, TCG_TMP0
);
1030 static void tgen64_ori(TCGContext
*s
, TCGReg dest
, tcg_target_ulong val
)
1032 static const S390Opcode oi_insns
[4] = {
1033 RI_OILL
, RI_OILH
, RI_OIHL
, RI_OIHH
1035 static const S390Opcode nif_insns
[2] = {
1041 /* Look for no-op. */
1046 if (facilities
& FACILITY_EXT_IMM
) {
1047 /* Try all 32-bit insns that can perform it in one go. */
1048 for (i
= 0; i
< 4; i
++) {
1049 tcg_target_ulong mask
= (0xffffull
<< i
*16);
1050 if ((val
& mask
) != 0 && (val
& ~mask
) == 0) {
1051 tcg_out_insn_RI(s
, oi_insns
[i
], dest
, val
>> i
*16);
1056 /* Try all 48-bit insns that can perform it in one go. */
1057 for (i
= 0; i
< 2; i
++) {
1058 tcg_target_ulong mask
= (0xffffffffull
<< i
*32);
1059 if ((val
& mask
) != 0 && (val
& ~mask
) == 0) {
1060 tcg_out_insn_RIL(s
, nif_insns
[i
], dest
, val
>> i
*32);
1065 /* Perform the OR via sequential modifications to the high and
1066 low parts. Do this via recursion to handle 16-bit vs 32-bit
1067 masks in each half. */
1068 tgen64_ori(s
, dest
, val
& 0x00000000ffffffffull
);
1069 tgen64_ori(s
, dest
, val
& 0xffffffff00000000ull
);
1071 /* With no extended-immediate facility, we don't need to be so
1072 clever. Just iterate over the insns and mask in the constant. */
1073 for (i
= 0; i
< 4; i
++) {
1074 tcg_target_ulong mask
= (0xffffull
<< i
*16);
1075 if ((val
& mask
) != 0) {
1076 tcg_out_insn_RI(s
, oi_insns
[i
], dest
, val
>> i
*16);
1082 static void tgen64_xori(TCGContext
*s
, TCGReg dest
, tcg_target_ulong val
)
1084 /* Perform the xor by parts. */
1085 if (val
& 0xffffffff) {
1086 tcg_out_insn(s
, RIL
, XILF
, dest
, val
);
1088 if (val
> 0xffffffff) {
1089 tcg_out_insn(s
, RIL
, XIHF
, dest
, val
>> 31 >> 1);
1093 static int tgen_cmp(TCGContext
*s
, TCGType type
, TCGCond c
, TCGReg r1
,
1094 TCGArg c2
, int c2const
)
1096 bool is_unsigned
= is_unsigned_cond(c
);
1099 if (type
== TCG_TYPE_I32
) {
1100 tcg_out_insn(s
, RR
, LTR
, r1
, r1
);
1102 tcg_out_insn(s
, RRE
, LTGR
, r1
, r1
);
1104 return tcg_cond_to_ltr_cond
[c
];
1107 if (type
== TCG_TYPE_I32
) {
1108 tcg_out_insn(s
, RIL
, CLFI
, r1
, c2
);
1110 tcg_out_insn(s
, RIL
, CLGFI
, r1
, c2
);
1113 if (type
== TCG_TYPE_I32
) {
1114 tcg_out_insn(s
, RIL
, CFI
, r1
, c2
);
1116 tcg_out_insn(s
, RIL
, CGFI
, r1
, c2
);
1122 if (type
== TCG_TYPE_I32
) {
1123 tcg_out_insn(s
, RR
, CLR
, r1
, c2
);
1125 tcg_out_insn(s
, RRE
, CLGR
, r1
, c2
);
1128 if (type
== TCG_TYPE_I32
) {
1129 tcg_out_insn(s
, RR
, CR
, r1
, c2
);
1131 tcg_out_insn(s
, RRE
, CGR
, r1
, c2
);
1135 return tcg_cond_to_s390_cond
[c
];
1138 static void tgen_setcond(TCGContext
*s
, TCGType type
, TCGCond cond
,
1139 TCGReg dest
, TCGReg c1
, TCGArg c2
, int c2const
)
1147 /* The result of a compare has CC=2 for GT and CC=3 unused.
1148 ADD LOGICAL WITH CARRY considers (CC & 2) the carry bit. */
1149 tgen_cmp(s
, type
, cond
, c1
, c2
, c2const
);
1150 tcg_out_movi(s
, type
, dest
, 0);
1151 tcg_out_insn(s
, RRE
, ALCGR
, dest
, dest
);
1156 /* We need "real" carry semantics, so use SUBTRACT LOGICAL
1157 instead of COMPARE LOGICAL. This needs an extra move. */
1158 tcg_out_mov(s
, type
, TCG_TMP0
, c1
);
1160 tcg_out_movi(s
, TCG_TYPE_I64
, dest
, 0);
1161 if (type
== TCG_TYPE_I32
) {
1162 tcg_out_insn(s
, RIL
, SLFI
, TCG_TMP0
, c2
);
1164 tcg_out_insn(s
, RIL
, SLGFI
, TCG_TMP0
, c2
);
1167 if (type
== TCG_TYPE_I32
) {
1168 tcg_out_insn(s
, RR
, SLR
, TCG_TMP0
, c2
);
1170 tcg_out_insn(s
, RRE
, SLGR
, TCG_TMP0
, c2
);
1172 tcg_out_movi(s
, TCG_TYPE_I64
, dest
, 0);
1174 tcg_out_insn(s
, RRE
, ALCGR
, dest
, dest
);
1180 /* Swap operands so that we can use GEU/GTU/GT. */
1182 tcg_out_movi(s
, type
, TCG_TMP0
, c2
);
1191 if (cond
== TCG_COND_LEU
) {
1194 cond
= tcg_swap_cond(cond
);
1198 /* X != 0 is X > 0. */
1199 if (c2const
&& c2
== 0) {
1200 cond
= TCG_COND_GTU
;
1206 /* X == 0 is X <= 0 is 0 >= X. */
1207 if (c2const
&& c2
== 0) {
1208 tcg_out_movi(s
, TCG_TYPE_I64
, TCG_TMP0
, 0);
1220 cc
= tgen_cmp(s
, type
, cond
, c1
, c2
, c2const
);
1221 if (facilities
& FACILITY_LOAD_ON_COND
) {
1222 /* Emit: d = 0, t = 1, d = (cc ? t : d). */
1223 tcg_out_movi(s
, TCG_TYPE_I64
, dest
, 0);
1224 tcg_out_movi(s
, TCG_TYPE_I64
, TCG_TMP0
, 1);
1225 tcg_out_insn(s
, RRF
, LOCGR
, dest
, TCG_TMP0
, cc
);
1227 /* Emit: d = 1; if (cc) goto over; d = 0; over: */
1228 tcg_out_movi(s
, type
, dest
, 1);
1229 tcg_out_insn(s
, RI
, BRC
, cc
, (4 + 4) >> 1);
1230 tcg_out_movi(s
, type
, dest
, 0);
1234 static void tgen_movcond(TCGContext
*s
, TCGType type
, TCGCond c
, TCGReg dest
,
1235 TCGReg c1
, TCGArg c2
, int c2const
, TCGReg r3
)
1238 if (facilities
& FACILITY_LOAD_ON_COND
) {
1239 cc
= tgen_cmp(s
, type
, c
, c1
, c2
, c2const
);
1240 tcg_out_insn(s
, RRF
, LOCGR
, dest
, r3
, cc
);
1242 c
= tcg_invert_cond(c
);
1243 cc
= tgen_cmp(s
, type
, c
, c1
, c2
, c2const
);
1245 /* Emit: if (cc) goto over; dest = r3; over: */
1246 tcg_out_insn(s
, RI
, BRC
, cc
, (4 + 4) >> 1);
1247 tcg_out_insn(s
, RRE
, LGR
, dest
, r3
);
1251 bool tcg_target_deposit_valid(int ofs
, int len
)
1253 return (facilities
& FACILITY_GEN_INST_EXT
) != 0;
1256 static void tgen_deposit(TCGContext
*s
, TCGReg dest
, TCGReg src
,
1259 int lsb
= (63 - ofs
);
1260 int msb
= lsb
- (len
- 1);
1261 tcg_out_risbg(s
, dest
, src
, msb
, lsb
, ofs
, 0);
1264 static void tgen_gotoi(TCGContext
*s
, int cc
, tcg_insn_unit
*dest
)
1266 ptrdiff_t off
= dest
- s
->code_ptr
;
1267 if (off
== (int16_t)off
) {
1268 tcg_out_insn(s
, RI
, BRC
, cc
, off
);
1269 } else if (off
== (int32_t)off
) {
1270 tcg_out_insn(s
, RIL
, BRCL
, cc
, off
);
1272 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_TMP0
, (uintptr_t)dest
);
1273 tcg_out_insn(s
, RR
, BCR
, cc
, TCG_TMP0
);
1277 static void tgen_branch(TCGContext
*s
, int cc
, TCGLabel
*l
)
1280 tgen_gotoi(s
, cc
, l
->u
.value_ptr
);
1281 } else if (USE_LONG_BRANCHES
) {
1282 tcg_out16(s
, RIL_BRCL
| (cc
<< 4));
1283 tcg_out_reloc(s
, s
->code_ptr
, R_390_PC32DBL
, l
, -2);
1286 tcg_out16(s
, RI_BRC
| (cc
<< 4));
1287 tcg_out_reloc(s
, s
->code_ptr
, R_390_PC16DBL
, l
, -2);
1292 static void tgen_compare_branch(TCGContext
*s
, S390Opcode opc
, int cc
,
1293 TCGReg r1
, TCGReg r2
, TCGLabel
*l
)
1298 off
= l
->u
.value_ptr
- s
->code_ptr
;
1300 /* We need to keep the offset unchanged for retranslation. */
1301 off
= s
->code_ptr
[1];
1302 tcg_out_reloc(s
, s
->code_ptr
+ 1, R_390_PC16DBL
, l
, -2);
1305 tcg_out16(s
, (opc
& 0xff00) | (r1
<< 4) | r2
);
1307 tcg_out16(s
, cc
<< 12 | (opc
& 0xff));
1310 static void tgen_compare_imm_branch(TCGContext
*s
, S390Opcode opc
, int cc
,
1311 TCGReg r1
, int i2
, TCGLabel
*l
)
1313 tcg_target_long off
;
1316 off
= l
->u
.value_ptr
- s
->code_ptr
;
1318 /* We need to keep the offset unchanged for retranslation. */
1319 off
= s
->code_ptr
[1];
1320 tcg_out_reloc(s
, s
->code_ptr
+ 1, R_390_PC16DBL
, l
, -2);
1323 tcg_out16(s
, (opc
& 0xff00) | (r1
<< 4) | cc
);
1325 tcg_out16(s
, (i2
<< 8) | (opc
& 0xff));
1328 static void tgen_brcond(TCGContext
*s
, TCGType type
, TCGCond c
,
1329 TCGReg r1
, TCGArg c2
, int c2const
, TCGLabel
*l
)
1333 if (facilities
& FACILITY_GEN_INST_EXT
) {
1334 bool is_unsigned
= is_unsigned_cond(c
);
1338 cc
= tcg_cond_to_s390_cond
[c
];
1341 opc
= (type
== TCG_TYPE_I32
1342 ? (is_unsigned
? RIE_CLRJ
: RIE_CRJ
)
1343 : (is_unsigned
? RIE_CLGRJ
: RIE_CGRJ
));
1344 tgen_compare_branch(s
, opc
, cc
, r1
, c2
, l
);
1348 /* COMPARE IMMEDIATE AND BRANCH RELATIVE has an 8-bit immediate field.
1349 If the immediate we've been given does not fit that range, we'll
1350 fall back to separate compare and branch instructions using the
1351 larger comparison range afforded by COMPARE IMMEDIATE. */
1352 if (type
== TCG_TYPE_I32
) {
1355 in_range
= (uint32_t)c2
== (uint8_t)c2
;
1358 in_range
= (int32_t)c2
== (int8_t)c2
;
1363 in_range
= (uint64_t)c2
== (uint8_t)c2
;
1366 in_range
= (int64_t)c2
== (int8_t)c2
;
1370 tgen_compare_imm_branch(s
, opc
, cc
, r1
, c2
, l
);
1375 cc
= tgen_cmp(s
, type
, c
, r1
, c2
, c2const
);
1376 tgen_branch(s
, cc
, l
);
1379 static void tcg_out_call(TCGContext
*s
, tcg_insn_unit
*dest
)
1381 ptrdiff_t off
= dest
- s
->code_ptr
;
1382 if (off
== (int32_t)off
) {
1383 tcg_out_insn(s
, RIL
, BRASL
, TCG_REG_R14
, off
);
1385 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_TMP0
, (uintptr_t)dest
);
1386 tcg_out_insn(s
, RR
, BASR
, TCG_REG_R14
, TCG_TMP0
);
1390 static void tcg_out_qemu_ld_direct(TCGContext
*s
, TCGMemOp opc
, TCGReg data
,
1391 TCGReg base
, TCGReg index
, int disp
)
1395 tcg_out_insn(s
, RXY
, LLGC
, data
, base
, index
, disp
);
1398 tcg_out_insn(s
, RXY
, LGB
, data
, base
, index
, disp
);
1401 case MO_UW
| MO_BSWAP
:
1402 /* swapped unsigned halfword load with upper bits zeroed */
1403 tcg_out_insn(s
, RXY
, LRVH
, data
, base
, index
, disp
);
1404 tgen_ext16u(s
, TCG_TYPE_I64
, data
, data
);
1407 tcg_out_insn(s
, RXY
, LLGH
, data
, base
, index
, disp
);
1410 case MO_SW
| MO_BSWAP
:
1411 /* swapped sign-extended halfword load */
1412 tcg_out_insn(s
, RXY
, LRVH
, data
, base
, index
, disp
);
1413 tgen_ext16s(s
, TCG_TYPE_I64
, data
, data
);
1416 tcg_out_insn(s
, RXY
, LGH
, data
, base
, index
, disp
);
1419 case MO_UL
| MO_BSWAP
:
1420 /* swapped unsigned int load with upper bits zeroed */
1421 tcg_out_insn(s
, RXY
, LRV
, data
, base
, index
, disp
);
1422 tgen_ext32u(s
, data
, data
);
1425 tcg_out_insn(s
, RXY
, LLGF
, data
, base
, index
, disp
);
1428 case MO_SL
| MO_BSWAP
:
1429 /* swapped sign-extended int load */
1430 tcg_out_insn(s
, RXY
, LRV
, data
, base
, index
, disp
);
1431 tgen_ext32s(s
, data
, data
);
1434 tcg_out_insn(s
, RXY
, LGF
, data
, base
, index
, disp
);
1437 case MO_Q
| MO_BSWAP
:
1438 tcg_out_insn(s
, RXY
, LRVG
, data
, base
, index
, disp
);
1441 tcg_out_insn(s
, RXY
, LG
, data
, base
, index
, disp
);
1449 static void tcg_out_qemu_st_direct(TCGContext
*s
, TCGMemOp opc
, TCGReg data
,
1450 TCGReg base
, TCGReg index
, int disp
)
1454 if (disp
>= 0 && disp
< 0x1000) {
1455 tcg_out_insn(s
, RX
, STC
, data
, base
, index
, disp
);
1457 tcg_out_insn(s
, RXY
, STCY
, data
, base
, index
, disp
);
1461 case MO_UW
| MO_BSWAP
:
1462 tcg_out_insn(s
, RXY
, STRVH
, data
, base
, index
, disp
);
1465 if (disp
>= 0 && disp
< 0x1000) {
1466 tcg_out_insn(s
, RX
, STH
, data
, base
, index
, disp
);
1468 tcg_out_insn(s
, RXY
, STHY
, data
, base
, index
, disp
);
1472 case MO_UL
| MO_BSWAP
:
1473 tcg_out_insn(s
, RXY
, STRV
, data
, base
, index
, disp
);
1476 if (disp
>= 0 && disp
< 0x1000) {
1477 tcg_out_insn(s
, RX
, ST
, data
, base
, index
, disp
);
1479 tcg_out_insn(s
, RXY
, STY
, data
, base
, index
, disp
);
1483 case MO_Q
| MO_BSWAP
:
1484 tcg_out_insn(s
, RXY
, STRVG
, data
, base
, index
, disp
);
1487 tcg_out_insn(s
, RXY
, STG
, data
, base
, index
, disp
);
1495 #if defined(CONFIG_SOFTMMU)
1496 /* We're expecting to use a 20-bit signed offset on the tlb memory ops.
1497 Using the offset of the second entry in the last tlb table ensures
1498 that we can index all of the elements of the first entry. */
1499 QEMU_BUILD_BUG_ON(offsetof(CPUArchState
, tlb_table
[NB_MMU_MODES
- 1][1])
1502 /* Load and compare a TLB entry, leaving the flags set. Loads the TLB
1503 addend into R2. Returns a register with the santitized guest address. */
1504 static TCGReg
tcg_out_tlb_read(TCGContext
* s
, TCGReg addr_reg
, TCGMemOp opc
,
1505 int mem_index
, bool is_ld
)
1507 TCGMemOp s_bits
= opc
& MO_SIZE
;
1508 uint64_t tlb_mask
= TARGET_PAGE_MASK
| ((1 << s_bits
) - 1);
1511 if (facilities
& FACILITY_GEN_INST_EXT
) {
1512 tcg_out_risbg(s
, TCG_REG_R2
, addr_reg
,
1513 64 - CPU_TLB_BITS
- CPU_TLB_ENTRY_BITS
,
1514 63 - CPU_TLB_ENTRY_BITS
,
1515 64 + CPU_TLB_ENTRY_BITS
- TARGET_PAGE_BITS
, 1);
1516 tgen_andi_risbg(s
, TCG_REG_R3
, addr_reg
, tlb_mask
);
1518 tcg_out_sh64(s
, RSY_SRLG
, TCG_REG_R2
, addr_reg
, TCG_REG_NONE
,
1519 TARGET_PAGE_BITS
- CPU_TLB_ENTRY_BITS
);
1520 tcg_out_movi(s
, TCG_TYPE_TL
, TCG_REG_R3
, addr_reg
);
1521 tgen_andi(s
, TCG_TYPE_I64
, TCG_REG_R2
,
1522 (CPU_TLB_SIZE
- 1) << CPU_TLB_ENTRY_BITS
);
1523 tgen_andi(s
, TCG_TYPE_TL
, TCG_REG_R3
, tlb_mask
);
1527 ofs
= offsetof(CPUArchState
, tlb_table
[mem_index
][0].addr_read
);
1529 ofs
= offsetof(CPUArchState
, tlb_table
[mem_index
][0].addr_write
);
1531 if (TARGET_LONG_BITS
== 32) {
1532 tcg_out_mem(s
, RX_C
, RXY_CY
, TCG_REG_R3
, TCG_REG_R2
, TCG_AREG0
, ofs
);
1534 tcg_out_mem(s
, 0, RXY_CG
, TCG_REG_R3
, TCG_REG_R2
, TCG_AREG0
, ofs
);
1537 ofs
= offsetof(CPUArchState
, tlb_table
[mem_index
][0].addend
);
1538 tcg_out_mem(s
, 0, RXY_LG
, TCG_REG_R2
, TCG_REG_R2
, TCG_AREG0
, ofs
);
1540 if (TARGET_LONG_BITS
== 32) {
1541 tgen_ext32u(s
, TCG_REG_R3
, addr_reg
);
1547 static void add_qemu_ldst_label(TCGContext
*s
, bool is_ld
, TCGMemOp opc
,
1548 TCGReg data
, TCGReg addr
, int mem_index
,
1549 tcg_insn_unit
*raddr
, tcg_insn_unit
*label_ptr
)
1551 TCGLabelQemuLdst
*label
= new_ldst_label(s
);
1553 label
->is_ld
= is_ld
;
1555 label
->datalo_reg
= data
;
1556 label
->addrlo_reg
= addr
;
1557 label
->mem_index
= mem_index
;
1558 label
->raddr
= raddr
;
1559 label
->label_ptr
[0] = label_ptr
;
1562 static void tcg_out_qemu_ld_slow_path(TCGContext
*s
, TCGLabelQemuLdst
*lb
)
1564 TCGReg addr_reg
= lb
->addrlo_reg
;
1565 TCGReg data_reg
= lb
->datalo_reg
;
1566 TCGMemOp opc
= lb
->opc
;
1568 patch_reloc(lb
->label_ptr
[0], R_390_PC16DBL
, (intptr_t)s
->code_ptr
, -2);
1570 tcg_out_mov(s
, TCG_TYPE_PTR
, TCG_REG_R2
, TCG_AREG0
);
1571 if (TARGET_LONG_BITS
== 64) {
1572 tcg_out_mov(s
, TCG_TYPE_I64
, TCG_REG_R3
, addr_reg
);
1574 tcg_out_movi(s
, TCG_TYPE_I32
, TCG_REG_R4
, lb
->mem_index
);
1575 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_REG_R5
, (uintptr_t)lb
->raddr
);
1576 tcg_out_call(s
, qemu_ld_helpers
[opc
]);
1577 tcg_out_mov(s
, TCG_TYPE_I64
, data_reg
, TCG_REG_R2
);
1579 tgen_gotoi(s
, S390_CC_ALWAYS
, lb
->raddr
);
1582 static void tcg_out_qemu_st_slow_path(TCGContext
*s
, TCGLabelQemuLdst
*lb
)
1584 TCGReg addr_reg
= lb
->addrlo_reg
;
1585 TCGReg data_reg
= lb
->datalo_reg
;
1586 TCGMemOp opc
= lb
->opc
;
1588 patch_reloc(lb
->label_ptr
[0], R_390_PC16DBL
, (intptr_t)s
->code_ptr
, -2);
1590 tcg_out_mov(s
, TCG_TYPE_PTR
, TCG_REG_R2
, TCG_AREG0
);
1591 if (TARGET_LONG_BITS
== 64) {
1592 tcg_out_mov(s
, TCG_TYPE_I64
, TCG_REG_R3
, addr_reg
);
1594 switch (opc
& MO_SIZE
) {
1596 tgen_ext8u(s
, TCG_TYPE_I64
, TCG_REG_R4
, data_reg
);
1599 tgen_ext16u(s
, TCG_TYPE_I64
, TCG_REG_R4
, data_reg
);
1602 tgen_ext32u(s
, TCG_REG_R4
, data_reg
);
1605 tcg_out_mov(s
, TCG_TYPE_I64
, TCG_REG_R4
, data_reg
);
1610 tcg_out_movi(s
, TCG_TYPE_I32
, TCG_REG_R5
, lb
->mem_index
);
1611 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_REG_R6
, (uintptr_t)lb
->raddr
);
1612 tcg_out_call(s
, qemu_st_helpers
[opc
]);
1614 tgen_gotoi(s
, S390_CC_ALWAYS
, lb
->raddr
);
1617 static void tcg_prepare_user_ldst(TCGContext
*s
, TCGReg
*addr_reg
,
1618 TCGReg
*index_reg
, tcg_target_long
*disp
)
1620 if (TARGET_LONG_BITS
== 32) {
1621 tgen_ext32u(s
, TCG_TMP0
, *addr_reg
);
1622 *addr_reg
= TCG_TMP0
;
1624 if (GUEST_BASE
< 0x80000) {
1625 *index_reg
= TCG_REG_NONE
;
1628 *index_reg
= TCG_GUEST_BASE_REG
;
1632 #endif /* CONFIG_SOFTMMU */
1634 static void tcg_out_qemu_ld(TCGContext
* s
, TCGReg data_reg
, TCGReg addr_reg
,
1635 TCGMemOp opc
, int mem_index
)
1637 #ifdef CONFIG_SOFTMMU
1638 tcg_insn_unit
*label_ptr
;
1641 base_reg
= tcg_out_tlb_read(s
, addr_reg
, opc
, mem_index
, 1);
1643 label_ptr
= s
->code_ptr
+ 1;
1644 tcg_out_insn(s
, RI
, BRC
, S390_CC_NE
, 0);
1646 tcg_out_qemu_ld_direct(s
, opc
, data_reg
, base_reg
, TCG_REG_R2
, 0);
1648 add_qemu_ldst_label(s
, 1, opc
, data_reg
, addr_reg
, mem_index
,
1649 s
->code_ptr
, label_ptr
);
1652 tcg_target_long disp
;
1654 tcg_prepare_user_ldst(s
, &addr_reg
, &index_reg
, &disp
);
1655 tcg_out_qemu_ld_direct(s
, opc
, data_reg
, addr_reg
, index_reg
, disp
);
1659 static void tcg_out_qemu_st(TCGContext
* s
, TCGReg data_reg
, TCGReg addr_reg
,
1660 TCGMemOp opc
, int mem_index
)
1662 #ifdef CONFIG_SOFTMMU
1663 tcg_insn_unit
*label_ptr
;
1666 base_reg
= tcg_out_tlb_read(s
, addr_reg
, opc
, mem_index
, 0);
1668 label_ptr
= s
->code_ptr
+ 1;
1669 tcg_out_insn(s
, RI
, BRC
, S390_CC_NE
, 0);
1671 tcg_out_qemu_st_direct(s
, opc
, data_reg
, base_reg
, TCG_REG_R2
, 0);
1673 add_qemu_ldst_label(s
, 0, opc
, data_reg
, addr_reg
, mem_index
,
1674 s
->code_ptr
, label_ptr
);
1677 tcg_target_long disp
;
1679 tcg_prepare_user_ldst(s
, &addr_reg
, &index_reg
, &disp
);
1680 tcg_out_qemu_st_direct(s
, opc
, data_reg
, addr_reg
, index_reg
, disp
);
1684 # define OP_32_64(x) \
1685 case glue(glue(INDEX_op_,x),_i32): \
1686 case glue(glue(INDEX_op_,x),_i64)
1688 static inline void tcg_out_op(TCGContext
*s
, TCGOpcode opc
,
1689 const TCGArg
*args
, const int *const_args
)
1695 case INDEX_op_exit_tb
:
1697 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_REG_R2
, args
[0]);
1698 tgen_gotoi(s
, S390_CC_ALWAYS
, tb_ret_addr
);
1701 case INDEX_op_goto_tb
:
1702 if (s
->tb_jmp_offset
) {
1703 tcg_out16(s
, RIL_BRCL
| (S390_CC_ALWAYS
<< 4));
1704 s
->tb_jmp_offset
[args
[0]] = tcg_current_code_size(s
);
1707 /* load address stored at s->tb_next + args[0] */
1708 tcg_out_ld_abs(s
, TCG_TYPE_PTR
, TCG_TMP0
, s
->tb_next
+ args
[0]);
1710 tcg_out_insn(s
, RR
, BCR
, S390_CC_ALWAYS
, TCG_TMP0
);
1712 s
->tb_next_offset
[args
[0]] = tcg_current_code_size(s
);
1716 /* ??? LLC (RXY format) is only present with the extended-immediate
1717 facility, whereas LLGC is always present. */
1718 tcg_out_mem(s
, 0, RXY_LLGC
, args
[0], args
[1], TCG_REG_NONE
, args
[2]);
1722 /* ??? LB is no smaller than LGB, so no point to using it. */
1723 tcg_out_mem(s
, 0, RXY_LGB
, args
[0], args
[1], TCG_REG_NONE
, args
[2]);
1727 /* ??? LLH (RXY format) is only present with the extended-immediate
1728 facility, whereas LLGH is always present. */
1729 tcg_out_mem(s
, 0, RXY_LLGH
, args
[0], args
[1], TCG_REG_NONE
, args
[2]);
1732 case INDEX_op_ld16s_i32
:
1733 tcg_out_mem(s
, RX_LH
, RXY_LHY
, args
[0], args
[1], TCG_REG_NONE
, args
[2]);
1736 case INDEX_op_ld_i32
:
1737 tcg_out_ld(s
, TCG_TYPE_I32
, args
[0], args
[1], args
[2]);
1741 tcg_out_mem(s
, RX_STC
, RXY_STCY
, args
[0], args
[1],
1742 TCG_REG_NONE
, args
[2]);
1746 tcg_out_mem(s
, RX_STH
, RXY_STHY
, args
[0], args
[1],
1747 TCG_REG_NONE
, args
[2]);
1750 case INDEX_op_st_i32
:
1751 tcg_out_st(s
, TCG_TYPE_I32
, args
[0], args
[1], args
[2]);
1754 case INDEX_op_add_i32
:
1755 a0
= args
[0], a1
= args
[1], a2
= (int32_t)args
[2];
1756 if (const_args
[2]) {
1759 if (a2
== (int16_t)a2
) {
1760 tcg_out_insn(s
, RI
, AHI
, a0
, a2
);
1763 if (facilities
& FACILITY_EXT_IMM
) {
1764 tcg_out_insn(s
, RIL
, AFI
, a0
, a2
);
1768 tcg_out_mem(s
, RX_LA
, RXY_LAY
, a0
, a1
, TCG_REG_NONE
, a2
);
1769 } else if (a0
== a1
) {
1770 tcg_out_insn(s
, RR
, AR
, a0
, a2
);
1772 tcg_out_insn(s
, RX
, LA
, a0
, a1
, a2
, 0);
1775 case INDEX_op_sub_i32
:
1776 a0
= args
[0], a1
= args
[1], a2
= (int32_t)args
[2];
1777 if (const_args
[2]) {
1781 tcg_out_insn(s
, RR
, SR
, args
[0], args
[2]);
1784 case INDEX_op_and_i32
:
1785 if (const_args
[2]) {
1786 tgen_andi(s
, TCG_TYPE_I32
, args
[0], args
[2]);
1788 tcg_out_insn(s
, RR
, NR
, args
[0], args
[2]);
1791 case INDEX_op_or_i32
:
1792 if (const_args
[2]) {
1793 tgen64_ori(s
, args
[0], args
[2] & 0xffffffff);
1795 tcg_out_insn(s
, RR
, OR
, args
[0], args
[2]);
1798 case INDEX_op_xor_i32
:
1799 if (const_args
[2]) {
1800 tgen64_xori(s
, args
[0], args
[2] & 0xffffffff);
1802 tcg_out_insn(s
, RR
, XR
, args
[0], args
[2]);
1806 case INDEX_op_neg_i32
:
1807 tcg_out_insn(s
, RR
, LCR
, args
[0], args
[1]);
1810 case INDEX_op_mul_i32
:
1811 if (const_args
[2]) {
1812 if ((int32_t)args
[2] == (int16_t)args
[2]) {
1813 tcg_out_insn(s
, RI
, MHI
, args
[0], args
[2]);
1815 tcg_out_insn(s
, RIL
, MSFI
, args
[0], args
[2]);
1818 tcg_out_insn(s
, RRE
, MSR
, args
[0], args
[2]);
1822 case INDEX_op_div2_i32
:
1823 tcg_out_insn(s
, RR
, DR
, TCG_REG_R2
, args
[4]);
1825 case INDEX_op_divu2_i32
:
1826 tcg_out_insn(s
, RRE
, DLR
, TCG_REG_R2
, args
[4]);
1829 case INDEX_op_shl_i32
:
1832 if (const_args
[2]) {
1833 tcg_out_sh32(s
, op
, args
[0], TCG_REG_NONE
, args
[2]);
1835 tcg_out_sh32(s
, op
, args
[0], args
[2], 0);
1838 case INDEX_op_shr_i32
:
1841 case INDEX_op_sar_i32
:
1845 case INDEX_op_rotl_i32
:
1846 /* ??? Using tcg_out_sh64 here for the format; it is a 32-bit rol. */
1847 if (const_args
[2]) {
1848 tcg_out_sh64(s
, RSY_RLL
, args
[0], args
[1], TCG_REG_NONE
, args
[2]);
1850 tcg_out_sh64(s
, RSY_RLL
, args
[0], args
[1], args
[2], 0);
1853 case INDEX_op_rotr_i32
:
1854 if (const_args
[2]) {
1855 tcg_out_sh64(s
, RSY_RLL
, args
[0], args
[1],
1856 TCG_REG_NONE
, (32 - args
[2]) & 31);
1858 tcg_out_insn(s
, RR
, LCR
, TCG_TMP0
, args
[2]);
1859 tcg_out_sh64(s
, RSY_RLL
, args
[0], args
[1], TCG_TMP0
, 0);
1863 case INDEX_op_ext8s_i32
:
1864 tgen_ext8s(s
, TCG_TYPE_I32
, args
[0], args
[1]);
1866 case INDEX_op_ext16s_i32
:
1867 tgen_ext16s(s
, TCG_TYPE_I32
, args
[0], args
[1]);
1869 case INDEX_op_ext8u_i32
:
1870 tgen_ext8u(s
, TCG_TYPE_I32
, args
[0], args
[1]);
1872 case INDEX_op_ext16u_i32
:
1873 tgen_ext16u(s
, TCG_TYPE_I32
, args
[0], args
[1]);
1877 /* The TCG bswap definition requires bits 0-47 already be zero.
1878 Thus we don't need the G-type insns to implement bswap16_i64. */
1879 tcg_out_insn(s
, RRE
, LRVR
, args
[0], args
[1]);
1880 tcg_out_sh32(s
, RS_SRL
, args
[0], TCG_REG_NONE
, 16);
1883 tcg_out_insn(s
, RRE
, LRVR
, args
[0], args
[1]);
1886 case INDEX_op_add2_i32
:
1887 if (const_args
[4]) {
1888 tcg_out_insn(s
, RIL
, ALFI
, args
[0], args
[4]);
1890 tcg_out_insn(s
, RR
, ALR
, args
[0], args
[4]);
1892 tcg_out_insn(s
, RRE
, ALCR
, args
[1], args
[5]);
1894 case INDEX_op_sub2_i32
:
1895 if (const_args
[4]) {
1896 tcg_out_insn(s
, RIL
, SLFI
, args
[0], args
[4]);
1898 tcg_out_insn(s
, RR
, SLR
, args
[0], args
[4]);
1900 tcg_out_insn(s
, RRE
, SLBR
, args
[1], args
[5]);
1904 tgen_branch(s
, S390_CC_ALWAYS
, arg_label(args
[0]));
1907 case INDEX_op_brcond_i32
:
1908 tgen_brcond(s
, TCG_TYPE_I32
, args
[2], args
[0],
1909 args
[1], const_args
[1], arg_label(args
[3]));
1911 case INDEX_op_setcond_i32
:
1912 tgen_setcond(s
, TCG_TYPE_I32
, args
[3], args
[0], args
[1],
1913 args
[2], const_args
[2]);
1915 case INDEX_op_movcond_i32
:
1916 tgen_movcond(s
, TCG_TYPE_I32
, args
[5], args
[0], args
[1],
1917 args
[2], const_args
[2], args
[3]);
1920 case INDEX_op_qemu_ld_i32
:
1921 /* ??? Technically we can use a non-extending instruction. */
1922 case INDEX_op_qemu_ld_i64
:
1923 tcg_out_qemu_ld(s
, args
[0], args
[1], args
[2], args
[3]);
1925 case INDEX_op_qemu_st_i32
:
1926 case INDEX_op_qemu_st_i64
:
1927 tcg_out_qemu_st(s
, args
[0], args
[1], args
[2], args
[3]);
1930 case INDEX_op_ld16s_i64
:
1931 tcg_out_mem(s
, 0, RXY_LGH
, args
[0], args
[1], TCG_REG_NONE
, args
[2]);
1933 case INDEX_op_ld32u_i64
:
1934 tcg_out_mem(s
, 0, RXY_LLGF
, args
[0], args
[1], TCG_REG_NONE
, args
[2]);
1936 case INDEX_op_ld32s_i64
:
1937 tcg_out_mem(s
, 0, RXY_LGF
, args
[0], args
[1], TCG_REG_NONE
, args
[2]);
1939 case INDEX_op_ld_i64
:
1940 tcg_out_ld(s
, TCG_TYPE_I64
, args
[0], args
[1], args
[2]);
1943 case INDEX_op_st32_i64
:
1944 tcg_out_st(s
, TCG_TYPE_I32
, args
[0], args
[1], args
[2]);
1946 case INDEX_op_st_i64
:
1947 tcg_out_st(s
, TCG_TYPE_I64
, args
[0], args
[1], args
[2]);
1950 case INDEX_op_add_i64
:
1951 a0
= args
[0], a1
= args
[1], a2
= args
[2];
1952 if (const_args
[2]) {
1955 if (a2
== (int16_t)a2
) {
1956 tcg_out_insn(s
, RI
, AGHI
, a0
, a2
);
1959 if (facilities
& FACILITY_EXT_IMM
) {
1960 if (a2
== (int32_t)a2
) {
1961 tcg_out_insn(s
, RIL
, AGFI
, a0
, a2
);
1963 } else if (a2
== (uint32_t)a2
) {
1964 tcg_out_insn(s
, RIL
, ALGFI
, a0
, a2
);
1966 } else if (-a2
== (uint32_t)-a2
) {
1967 tcg_out_insn(s
, RIL
, SLGFI
, a0
, -a2
);
1972 tcg_out_mem(s
, RX_LA
, RXY_LAY
, a0
, a1
, TCG_REG_NONE
, a2
);
1973 } else if (a0
== a1
) {
1974 tcg_out_insn(s
, RRE
, AGR
, a0
, a2
);
1976 tcg_out_insn(s
, RX
, LA
, a0
, a1
, a2
, 0);
1979 case INDEX_op_sub_i64
:
1980 a0
= args
[0], a1
= args
[1], a2
= args
[2];
1981 if (const_args
[2]) {
1985 tcg_out_insn(s
, RRE
, SGR
, args
[0], args
[2]);
1989 case INDEX_op_and_i64
:
1990 if (const_args
[2]) {
1991 tgen_andi(s
, TCG_TYPE_I64
, args
[0], args
[2]);
1993 tcg_out_insn(s
, RRE
, NGR
, args
[0], args
[2]);
1996 case INDEX_op_or_i64
:
1997 if (const_args
[2]) {
1998 tgen64_ori(s
, args
[0], args
[2]);
2000 tcg_out_insn(s
, RRE
, OGR
, args
[0], args
[2]);
2003 case INDEX_op_xor_i64
:
2004 if (const_args
[2]) {
2005 tgen64_xori(s
, args
[0], args
[2]);
2007 tcg_out_insn(s
, RRE
, XGR
, args
[0], args
[2]);
2011 case INDEX_op_neg_i64
:
2012 tcg_out_insn(s
, RRE
, LCGR
, args
[0], args
[1]);
2014 case INDEX_op_bswap64_i64
:
2015 tcg_out_insn(s
, RRE
, LRVGR
, args
[0], args
[1]);
2018 case INDEX_op_mul_i64
:
2019 if (const_args
[2]) {
2020 if (args
[2] == (int16_t)args
[2]) {
2021 tcg_out_insn(s
, RI
, MGHI
, args
[0], args
[2]);
2023 tcg_out_insn(s
, RIL
, MSGFI
, args
[0], args
[2]);
2026 tcg_out_insn(s
, RRE
, MSGR
, args
[0], args
[2]);
2030 case INDEX_op_div2_i64
:
2031 /* ??? We get an unnecessary sign-extension of the dividend
2032 into R3 with this definition, but as we do in fact always
2033 produce both quotient and remainder using INDEX_op_div_i64
2034 instead requires jumping through even more hoops. */
2035 tcg_out_insn(s
, RRE
, DSGR
, TCG_REG_R2
, args
[4]);
2037 case INDEX_op_divu2_i64
:
2038 tcg_out_insn(s
, RRE
, DLGR
, TCG_REG_R2
, args
[4]);
2040 case INDEX_op_mulu2_i64
:
2041 tcg_out_insn(s
, RRE
, MLGR
, TCG_REG_R2
, args
[3]);
2044 case INDEX_op_shl_i64
:
2047 if (const_args
[2]) {
2048 tcg_out_sh64(s
, op
, args
[0], args
[1], TCG_REG_NONE
, args
[2]);
2050 tcg_out_sh64(s
, op
, args
[0], args
[1], args
[2], 0);
2053 case INDEX_op_shr_i64
:
2056 case INDEX_op_sar_i64
:
2060 case INDEX_op_rotl_i64
:
2061 if (const_args
[2]) {
2062 tcg_out_sh64(s
, RSY_RLLG
, args
[0], args
[1],
2063 TCG_REG_NONE
, args
[2]);
2065 tcg_out_sh64(s
, RSY_RLLG
, args
[0], args
[1], args
[2], 0);
2068 case INDEX_op_rotr_i64
:
2069 if (const_args
[2]) {
2070 tcg_out_sh64(s
, RSY_RLLG
, args
[0], args
[1],
2071 TCG_REG_NONE
, (64 - args
[2]) & 63);
2073 /* We can use the smaller 32-bit negate because only the
2074 low 6 bits are examined for the rotate. */
2075 tcg_out_insn(s
, RR
, LCR
, TCG_TMP0
, args
[2]);
2076 tcg_out_sh64(s
, RSY_RLLG
, args
[0], args
[1], TCG_TMP0
, 0);
2080 case INDEX_op_ext8s_i64
:
2081 tgen_ext8s(s
, TCG_TYPE_I64
, args
[0], args
[1]);
2083 case INDEX_op_ext16s_i64
:
2084 tgen_ext16s(s
, TCG_TYPE_I64
, args
[0], args
[1]);
2086 case INDEX_op_ext32s_i64
:
2087 tgen_ext32s(s
, args
[0], args
[1]);
2089 case INDEX_op_ext8u_i64
:
2090 tgen_ext8u(s
, TCG_TYPE_I64
, args
[0], args
[1]);
2092 case INDEX_op_ext16u_i64
:
2093 tgen_ext16u(s
, TCG_TYPE_I64
, args
[0], args
[1]);
2095 case INDEX_op_ext32u_i64
:
2096 tgen_ext32u(s
, args
[0], args
[1]);
2099 case INDEX_op_add2_i64
:
2100 if (const_args
[4]) {
2101 if ((int64_t)args
[4] >= 0) {
2102 tcg_out_insn(s
, RIL
, ALGFI
, args
[0], args
[4]);
2104 tcg_out_insn(s
, RIL
, SLGFI
, args
[0], -args
[4]);
2107 tcg_out_insn(s
, RRE
, ALGR
, args
[0], args
[4]);
2109 tcg_out_insn(s
, RRE
, ALCGR
, args
[1], args
[5]);
2111 case INDEX_op_sub2_i64
:
2112 if (const_args
[4]) {
2113 if ((int64_t)args
[4] >= 0) {
2114 tcg_out_insn(s
, RIL
, SLGFI
, args
[0], args
[4]);
2116 tcg_out_insn(s
, RIL
, ALGFI
, args
[0], -args
[4]);
2119 tcg_out_insn(s
, RRE
, SLGR
, args
[0], args
[4]);
2121 tcg_out_insn(s
, RRE
, SLBGR
, args
[1], args
[5]);
2124 case INDEX_op_brcond_i64
:
2125 tgen_brcond(s
, TCG_TYPE_I64
, args
[2], args
[0],
2126 args
[1], const_args
[1], arg_label(args
[3]));
2128 case INDEX_op_setcond_i64
:
2129 tgen_setcond(s
, TCG_TYPE_I64
, args
[3], args
[0], args
[1],
2130 args
[2], const_args
[2]);
2132 case INDEX_op_movcond_i64
:
2133 tgen_movcond(s
, TCG_TYPE_I64
, args
[5], args
[0], args
[1],
2134 args
[2], const_args
[2], args
[3]);
2138 tgen_deposit(s
, args
[0], args
[2], args
[3], args
[4]);
2141 case INDEX_op_mov_i32
: /* Always emitted via tcg_out_mov. */
2142 case INDEX_op_mov_i64
:
2143 case INDEX_op_movi_i32
: /* Always emitted via tcg_out_movi. */
2144 case INDEX_op_movi_i64
:
2145 case INDEX_op_call
: /* Always emitted via tcg_out_call. */
2151 static const TCGTargetOpDef s390_op_defs
[] = {
2152 { INDEX_op_exit_tb
, { } },
2153 { INDEX_op_goto_tb
, { } },
2154 { INDEX_op_br
, { } },
2156 { INDEX_op_ld8u_i32
, { "r", "r" } },
2157 { INDEX_op_ld8s_i32
, { "r", "r" } },
2158 { INDEX_op_ld16u_i32
, { "r", "r" } },
2159 { INDEX_op_ld16s_i32
, { "r", "r" } },
2160 { INDEX_op_ld_i32
, { "r", "r" } },
2161 { INDEX_op_st8_i32
, { "r", "r" } },
2162 { INDEX_op_st16_i32
, { "r", "r" } },
2163 { INDEX_op_st_i32
, { "r", "r" } },
2165 { INDEX_op_add_i32
, { "r", "r", "ri" } },
2166 { INDEX_op_sub_i32
, { "r", "0", "ri" } },
2167 { INDEX_op_mul_i32
, { "r", "0", "rK" } },
2169 { INDEX_op_div2_i32
, { "b", "a", "0", "1", "r" } },
2170 { INDEX_op_divu2_i32
, { "b", "a", "0", "1", "r" } },
2172 { INDEX_op_and_i32
, { "r", "0", "ri" } },
2173 { INDEX_op_or_i32
, { "r", "0", "rO" } },
2174 { INDEX_op_xor_i32
, { "r", "0", "rX" } },
2176 { INDEX_op_neg_i32
, { "r", "r" } },
2178 { INDEX_op_shl_i32
, { "r", "0", "Ri" } },
2179 { INDEX_op_shr_i32
, { "r", "0", "Ri" } },
2180 { INDEX_op_sar_i32
, { "r", "0", "Ri" } },
2182 { INDEX_op_rotl_i32
, { "r", "r", "Ri" } },
2183 { INDEX_op_rotr_i32
, { "r", "r", "Ri" } },
2185 { INDEX_op_ext8s_i32
, { "r", "r" } },
2186 { INDEX_op_ext8u_i32
, { "r", "r" } },
2187 { INDEX_op_ext16s_i32
, { "r", "r" } },
2188 { INDEX_op_ext16u_i32
, { "r", "r" } },
2190 { INDEX_op_bswap16_i32
, { "r", "r" } },
2191 { INDEX_op_bswap32_i32
, { "r", "r" } },
2193 { INDEX_op_add2_i32
, { "r", "r", "0", "1", "rA", "r" } },
2194 { INDEX_op_sub2_i32
, { "r", "r", "0", "1", "rA", "r" } },
2196 { INDEX_op_brcond_i32
, { "r", "rC" } },
2197 { INDEX_op_setcond_i32
, { "r", "r", "rC" } },
2198 { INDEX_op_movcond_i32
, { "r", "r", "rC", "r", "0" } },
2199 { INDEX_op_deposit_i32
, { "r", "0", "r" } },
2201 { INDEX_op_qemu_ld_i32
, { "r", "L" } },
2202 { INDEX_op_qemu_ld_i64
, { "r", "L" } },
2203 { INDEX_op_qemu_st_i32
, { "L", "L" } },
2204 { INDEX_op_qemu_st_i64
, { "L", "L" } },
2206 { INDEX_op_ld8u_i64
, { "r", "r" } },
2207 { INDEX_op_ld8s_i64
, { "r", "r" } },
2208 { INDEX_op_ld16u_i64
, { "r", "r" } },
2209 { INDEX_op_ld16s_i64
, { "r", "r" } },
2210 { INDEX_op_ld32u_i64
, { "r", "r" } },
2211 { INDEX_op_ld32s_i64
, { "r", "r" } },
2212 { INDEX_op_ld_i64
, { "r", "r" } },
2214 { INDEX_op_st8_i64
, { "r", "r" } },
2215 { INDEX_op_st16_i64
, { "r", "r" } },
2216 { INDEX_op_st32_i64
, { "r", "r" } },
2217 { INDEX_op_st_i64
, { "r", "r" } },
2219 { INDEX_op_add_i64
, { "r", "r", "ri" } },
2220 { INDEX_op_sub_i64
, { "r", "0", "ri" } },
2221 { INDEX_op_mul_i64
, { "r", "0", "rK" } },
2223 { INDEX_op_div2_i64
, { "b", "a", "0", "1", "r" } },
2224 { INDEX_op_divu2_i64
, { "b", "a", "0", "1", "r" } },
2225 { INDEX_op_mulu2_i64
, { "b", "a", "0", "r" } },
2227 { INDEX_op_and_i64
, { "r", "0", "ri" } },
2228 { INDEX_op_or_i64
, { "r", "0", "rO" } },
2229 { INDEX_op_xor_i64
, { "r", "0", "rX" } },
2231 { INDEX_op_neg_i64
, { "r", "r" } },
2233 { INDEX_op_shl_i64
, { "r", "r", "Ri" } },
2234 { INDEX_op_shr_i64
, { "r", "r", "Ri" } },
2235 { INDEX_op_sar_i64
, { "r", "r", "Ri" } },
2237 { INDEX_op_rotl_i64
, { "r", "r", "Ri" } },
2238 { INDEX_op_rotr_i64
, { "r", "r", "Ri" } },
2240 { INDEX_op_ext8s_i64
, { "r", "r" } },
2241 { INDEX_op_ext8u_i64
, { "r", "r" } },
2242 { INDEX_op_ext16s_i64
, { "r", "r" } },
2243 { INDEX_op_ext16u_i64
, { "r", "r" } },
2244 { INDEX_op_ext32s_i64
, { "r", "r" } },
2245 { INDEX_op_ext32u_i64
, { "r", "r" } },
2247 { INDEX_op_bswap16_i64
, { "r", "r" } },
2248 { INDEX_op_bswap32_i64
, { "r", "r" } },
2249 { INDEX_op_bswap64_i64
, { "r", "r" } },
2251 { INDEX_op_add2_i64
, { "r", "r", "0", "1", "rA", "r" } },
2252 { INDEX_op_sub2_i64
, { "r", "r", "0", "1", "rA", "r" } },
2254 { INDEX_op_brcond_i64
, { "r", "rC" } },
2255 { INDEX_op_setcond_i64
, { "r", "r", "rC" } },
2256 { INDEX_op_movcond_i64
, { "r", "r", "rC", "r", "0" } },
2257 { INDEX_op_deposit_i64
, { "r", "0", "r" } },
2262 static void query_facilities(void)
2264 unsigned long hwcap
= qemu_getauxval(AT_HWCAP
);
2266 /* Is STORE FACILITY LIST EXTENDED available? Honestly, I believe this
2267 is present on all 64-bit systems, but let's check for it anyway. */
2268 if (hwcap
& HWCAP_S390_STFLE
) {
2269 register int r0
__asm__("0");
2270 register void *r1
__asm__("1");
2274 asm volatile(".word 0xb2b0,0x1000"
2275 : "=r"(r0
) : "0"(0), "r"(r1
) : "memory", "cc");
2279 static void tcg_target_init(TCGContext
*s
)
2283 tcg_regset_set32(tcg_target_available_regs
[TCG_TYPE_I32
], 0, 0xffff);
2284 tcg_regset_set32(tcg_target_available_regs
[TCG_TYPE_I64
], 0, 0xffff);
2286 tcg_regset_clear(tcg_target_call_clobber_regs
);
2287 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R0
);
2288 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R1
);
2289 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R2
);
2290 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R3
);
2291 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R4
);
2292 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R5
);
2293 /* The r6 register is technically call-saved, but it's also a parameter
2294 register, so it can get killed by setup for the qemu_st helper. */
2295 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R6
);
2296 /* The return register can be considered call-clobbered. */
2297 tcg_regset_set_reg(tcg_target_call_clobber_regs
, TCG_REG_R14
);
2299 tcg_regset_clear(s
->reserved_regs
);
2300 tcg_regset_set_reg(s
->reserved_regs
, TCG_TMP0
);
2301 /* XXX many insns can't be used with R0, so we better avoid it for now */
2302 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_R0
);
2303 tcg_regset_set_reg(s
->reserved_regs
, TCG_REG_CALL_STACK
);
2305 tcg_add_target_add_op_defs(s390_op_defs
);
2308 #define FRAME_SIZE ((int)(TCG_TARGET_CALL_STACK_OFFSET \
2309 + TCG_STATIC_CALL_ARGS_SIZE \
2310 + CPU_TEMP_BUF_NLONGS * sizeof(long)))
2312 static void tcg_target_qemu_prologue(TCGContext
*s
)
2314 /* stmg %r6,%r15,48(%r15) (save registers) */
2315 tcg_out_insn(s
, RXY
, STMG
, TCG_REG_R6
, TCG_REG_R15
, TCG_REG_R15
, 48);
2317 /* aghi %r15,-frame_size */
2318 tcg_out_insn(s
, RI
, AGHI
, TCG_REG_R15
, -FRAME_SIZE
);
2320 tcg_set_frame(s
, TCG_REG_CALL_STACK
,
2321 TCG_STATIC_CALL_ARGS_SIZE
+ TCG_TARGET_CALL_STACK_OFFSET
,
2322 CPU_TEMP_BUF_NLONGS
* sizeof(long));
2324 if (GUEST_BASE
>= 0x80000) {
2325 tcg_out_movi(s
, TCG_TYPE_PTR
, TCG_GUEST_BASE_REG
, GUEST_BASE
);
2326 tcg_regset_set_reg(s
->reserved_regs
, TCG_GUEST_BASE_REG
);
2329 tcg_out_mov(s
, TCG_TYPE_PTR
, TCG_AREG0
, tcg_target_call_iarg_regs
[0]);
2330 /* br %r3 (go to TB) */
2331 tcg_out_insn(s
, RR
, BCR
, S390_CC_ALWAYS
, tcg_target_call_iarg_regs
[1]);
2333 tb_ret_addr
= s
->code_ptr
;
2335 /* lmg %r6,%r15,fs+48(%r15) (restore registers) */
2336 tcg_out_insn(s
, RXY
, LMG
, TCG_REG_R6
, TCG_REG_R15
, TCG_REG_R15
,
2339 /* br %r14 (return) */
2340 tcg_out_insn(s
, RR
, BCR
, S390_CC_ALWAYS
, TCG_REG_R14
);
2345 uint8_t fde_def_cfa
[4];
2346 uint8_t fde_reg_ofs
[18];
2349 /* We're expecting a 2 byte uleb128 encoded value. */
2350 QEMU_BUILD_BUG_ON(FRAME_SIZE
>= (1 << 14));
2352 #define ELF_HOST_MACHINE EM_S390
2354 static const DebugFrame debug_frame
= {
2355 .h
.cie
.len
= sizeof(DebugFrameCIE
)-4, /* length after .len member */
2358 .h
.cie
.code_align
= 1,
2359 .h
.cie
.data_align
= 8, /* sleb128 8 */
2360 .h
.cie
.return_column
= TCG_REG_R14
,
2362 /* Total FDE size does not include the "len" member. */
2363 .h
.fde
.len
= sizeof(DebugFrame
) - offsetof(DebugFrame
, h
.fde
.cie_offset
),
2366 12, TCG_REG_CALL_STACK
, /* DW_CFA_def_cfa %r15, ... */
2367 (FRAME_SIZE
& 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
2371 0x86, 6, /* DW_CFA_offset, %r6, 48 */
2372 0x87, 7, /* DW_CFA_offset, %r7, 56 */
2373 0x88, 8, /* DW_CFA_offset, %r8, 64 */
2374 0x89, 9, /* DW_CFA_offset, %r92, 72 */
2375 0x8a, 10, /* DW_CFA_offset, %r10, 80 */
2376 0x8b, 11, /* DW_CFA_offset, %r11, 88 */
2377 0x8c, 12, /* DW_CFA_offset, %r12, 96 */
2378 0x8d, 13, /* DW_CFA_offset, %r13, 104 */
2379 0x8e, 14, /* DW_CFA_offset, %r14, 112 */
2383 void tcg_register_jit(void *buf
, size_t buf_size
)
2385 tcg_register_jit_int(buf
, buf_size
, &debug_frame
, sizeof(debug_frame
));